1 /*
2 * H.264/HEVC hardware encoding using nvidia nvenc
3 * Copyright (c) 2016 Timo Rothenpieler <timo@rothenpieler.org>
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23
24 #include "nvenc.h"
25
26 #include "libavutil/hwcontext_cuda.h"
27 #include "libavutil/hwcontext.h"
28 #include "libavutil/cuda_check.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/avassert.h"
31 #include "libavutil/mem.h"
32 #include "libavutil/pixdesc.h"
33 #include "internal.h"
34 #include "packet_internal.h"
35
36 #define CHECK_CU(x) FF_CUDA_CHECK_DL(avctx, dl_fn->cuda_dl, x)
37
38 #define NVENC_CAP 0x30
39 #define IS_CBR(rc) (rc == NV_ENC_PARAMS_RC_CBR || \
40 rc == NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ || \
41 rc == NV_ENC_PARAMS_RC_CBR_HQ)
42
43 const enum AVPixelFormat ff_nvenc_pix_fmts[] = {
44 AV_PIX_FMT_YUV420P,
45 AV_PIX_FMT_NV12,
46 AV_PIX_FMT_P010,
47 AV_PIX_FMT_YUV444P,
48 AV_PIX_FMT_P016, // Truncated to 10bits
49 AV_PIX_FMT_YUV444P16, // Truncated to 10bits
50 AV_PIX_FMT_0RGB32,
51 AV_PIX_FMT_0BGR32,
52 AV_PIX_FMT_CUDA,
53 #if CONFIG_D3D11VA
54 AV_PIX_FMT_D3D11,
55 #endif
56 AV_PIX_FMT_NONE
57 };
58
59 const AVCodecHWConfigInternal *ff_nvenc_hw_configs[] = {
60 HW_CONFIG_ENCODER_FRAMES(CUDA, CUDA),
61 HW_CONFIG_ENCODER_DEVICE(NONE, CUDA),
62 #if CONFIG_D3D11VA
63 HW_CONFIG_ENCODER_FRAMES(D3D11, D3D11VA),
64 HW_CONFIG_ENCODER_DEVICE(NONE, D3D11VA),
65 #endif
66 NULL,
67 };
68
69 #define IS_10BIT(pix_fmt) (pix_fmt == AV_PIX_FMT_P010 || \
70 pix_fmt == AV_PIX_FMT_P016 || \
71 pix_fmt == AV_PIX_FMT_YUV444P16)
72
73 #define IS_YUV444(pix_fmt) (pix_fmt == AV_PIX_FMT_YUV444P || \
74 pix_fmt == AV_PIX_FMT_YUV444P16)
75
76 static const struct {
77 NVENCSTATUS nverr;
78 int averr;
79 const char *desc;
80 } nvenc_errors[] = {
81 { NV_ENC_SUCCESS, 0, "success" },
82 { NV_ENC_ERR_NO_ENCODE_DEVICE, AVERROR(ENOENT), "no encode device" },
83 { NV_ENC_ERR_UNSUPPORTED_DEVICE, AVERROR(ENOSYS), "unsupported device" },
84 { NV_ENC_ERR_INVALID_ENCODERDEVICE, AVERROR(EINVAL), "invalid encoder device" },
85 { NV_ENC_ERR_INVALID_DEVICE, AVERROR(EINVAL), "invalid device" },
86 { NV_ENC_ERR_DEVICE_NOT_EXIST, AVERROR(EIO), "device does not exist" },
87 { NV_ENC_ERR_INVALID_PTR, AVERROR(EFAULT), "invalid ptr" },
88 { NV_ENC_ERR_INVALID_EVENT, AVERROR(EINVAL), "invalid event" },
89 { NV_ENC_ERR_INVALID_PARAM, AVERROR(EINVAL), "invalid param" },
90 { NV_ENC_ERR_INVALID_CALL, AVERROR(EINVAL), "invalid call" },
91 { NV_ENC_ERR_OUT_OF_MEMORY, AVERROR(ENOMEM), "out of memory" },
92 { NV_ENC_ERR_ENCODER_NOT_INITIALIZED, AVERROR(EINVAL), "encoder not initialized" },
93 { NV_ENC_ERR_UNSUPPORTED_PARAM, AVERROR(ENOSYS), "unsupported param" },
94 { NV_ENC_ERR_LOCK_BUSY, AVERROR(EAGAIN), "lock busy" },
95 { NV_ENC_ERR_NOT_ENOUGH_BUFFER, AVERROR_BUFFER_TOO_SMALL, "not enough buffer"},
96 { NV_ENC_ERR_INVALID_VERSION, AVERROR(EINVAL), "invalid version" },
97 { NV_ENC_ERR_MAP_FAILED, AVERROR(EIO), "map failed" },
98 { NV_ENC_ERR_NEED_MORE_INPUT, AVERROR(EAGAIN), "need more input" },
99 { NV_ENC_ERR_ENCODER_BUSY, AVERROR(EAGAIN), "encoder busy" },
100 { NV_ENC_ERR_EVENT_NOT_REGISTERD, AVERROR(EBADF), "event not registered" },
101 { NV_ENC_ERR_GENERIC, AVERROR_UNKNOWN, "generic error" },
102 { NV_ENC_ERR_INCOMPATIBLE_CLIENT_KEY, AVERROR(EINVAL), "incompatible client key" },
103 { NV_ENC_ERR_UNIMPLEMENTED, AVERROR(ENOSYS), "unimplemented" },
104 { NV_ENC_ERR_RESOURCE_REGISTER_FAILED, AVERROR(EIO), "resource register failed" },
105 { NV_ENC_ERR_RESOURCE_NOT_REGISTERED, AVERROR(EBADF), "resource not registered" },
106 { NV_ENC_ERR_RESOURCE_NOT_MAPPED, AVERROR(EBADF), "resource not mapped" },
107 };
108
nvenc_map_error(NVENCSTATUS err,const char ** desc)109 static int nvenc_map_error(NVENCSTATUS err, const char **desc)
110 {
111 int i;
112 for (i = 0; i < FF_ARRAY_ELEMS(nvenc_errors); i++) {
113 if (nvenc_errors[i].nverr == err) {
114 if (desc)
115 *desc = nvenc_errors[i].desc;
116 return nvenc_errors[i].averr;
117 }
118 }
119 if (desc)
120 *desc = "unknown error";
121 return AVERROR_UNKNOWN;
122 }
123
nvenc_print_error(AVCodecContext * avctx,NVENCSTATUS err,const char * error_string)124 static int nvenc_print_error(AVCodecContext *avctx, NVENCSTATUS err,
125 const char *error_string)
126 {
127 const char *desc;
128 const char *details = "(no details)";
129 int ret = nvenc_map_error(err, &desc);
130
131 #ifdef NVENC_HAVE_GETLASTERRORSTRING
132 NvencContext *ctx = avctx->priv_data;
133 NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &ctx->nvenc_dload_funcs.nvenc_funcs;
134
135 if (p_nvenc && ctx->nvencoder)
136 details = p_nvenc->nvEncGetLastErrorString(ctx->nvencoder);
137 #endif
138
139 av_log(avctx, AV_LOG_ERROR, "%s: %s (%d): %s\n", error_string, desc, err, details);
140
141 return ret;
142 }
143
nvenc_print_driver_requirement(AVCodecContext * avctx,int level)144 static void nvenc_print_driver_requirement(AVCodecContext *avctx, int level)
145 {
146 #if NVENCAPI_CHECK_VERSION(9, 2)
147 const char *minver = "(unknown)";
148 #elif NVENCAPI_CHECK_VERSION(9, 1)
149 # if defined(_WIN32) || defined(__CYGWIN__)
150 const char *minver = "436.15";
151 # else
152 const char *minver = "435.21";
153 # endif
154 #elif NVENCAPI_CHECK_VERSION(9, 0)
155 # if defined(_WIN32) || defined(__CYGWIN__)
156 const char *minver = "418.81";
157 # else
158 const char *minver = "418.30";
159 # endif
160 #elif NVENCAPI_CHECK_VERSION(8, 2)
161 # if defined(_WIN32) || defined(__CYGWIN__)
162 const char *minver = "397.93";
163 # else
164 const char *minver = "396.24";
165 #endif
166 #elif NVENCAPI_CHECK_VERSION(8, 1)
167 # if defined(_WIN32) || defined(__CYGWIN__)
168 const char *minver = "390.77";
169 # else
170 const char *minver = "390.25";
171 # endif
172 #else
173 # if defined(_WIN32) || defined(__CYGWIN__)
174 const char *minver = "378.66";
175 # else
176 const char *minver = "378.13";
177 # endif
178 #endif
179 av_log(avctx, level, "The minimum required Nvidia driver for nvenc is %s or newer\n", minver);
180 }
181
nvenc_load_libraries(AVCodecContext * avctx)182 static av_cold int nvenc_load_libraries(AVCodecContext *avctx)
183 {
184 NvencContext *ctx = avctx->priv_data;
185 NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
186 NVENCSTATUS err;
187 uint32_t nvenc_max_ver;
188 int ret;
189
190 ret = cuda_load_functions(&dl_fn->cuda_dl, avctx);
191 if (ret < 0)
192 return ret;
193
194 ret = nvenc_load_functions(&dl_fn->nvenc_dl, avctx);
195 if (ret < 0) {
196 nvenc_print_driver_requirement(avctx, AV_LOG_ERROR);
197 return ret;
198 }
199
200 err = dl_fn->nvenc_dl->NvEncodeAPIGetMaxSupportedVersion(&nvenc_max_ver);
201 if (err != NV_ENC_SUCCESS)
202 return nvenc_print_error(avctx, err, "Failed to query nvenc max version");
203
204 av_log(avctx, AV_LOG_VERBOSE, "Loaded Nvenc version %d.%d\n", nvenc_max_ver >> 4, nvenc_max_ver & 0xf);
205
206 if ((NVENCAPI_MAJOR_VERSION << 4 | NVENCAPI_MINOR_VERSION) > nvenc_max_ver) {
207 av_log(avctx, AV_LOG_ERROR, "Driver does not support the required nvenc API version. "
208 "Required: %d.%d Found: %d.%d\n",
209 NVENCAPI_MAJOR_VERSION, NVENCAPI_MINOR_VERSION,
210 nvenc_max_ver >> 4, nvenc_max_ver & 0xf);
211 nvenc_print_driver_requirement(avctx, AV_LOG_ERROR);
212 return AVERROR(ENOSYS);
213 }
214
215 dl_fn->nvenc_funcs.version = NV_ENCODE_API_FUNCTION_LIST_VER;
216
217 err = dl_fn->nvenc_dl->NvEncodeAPICreateInstance(&dl_fn->nvenc_funcs);
218 if (err != NV_ENC_SUCCESS)
219 return nvenc_print_error(avctx, err, "Failed to create nvenc instance");
220
221 av_log(avctx, AV_LOG_VERBOSE, "Nvenc initialized successfully\n");
222
223 return 0;
224 }
225
nvenc_push_context(AVCodecContext * avctx)226 static int nvenc_push_context(AVCodecContext *avctx)
227 {
228 NvencContext *ctx = avctx->priv_data;
229 NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
230
231 if (ctx->d3d11_device)
232 return 0;
233
234 return CHECK_CU(dl_fn->cuda_dl->cuCtxPushCurrent(ctx->cu_context));
235 }
236
nvenc_pop_context(AVCodecContext * avctx)237 static int nvenc_pop_context(AVCodecContext *avctx)
238 {
239 NvencContext *ctx = avctx->priv_data;
240 NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
241 CUcontext dummy;
242
243 if (ctx->d3d11_device)
244 return 0;
245
246 return CHECK_CU(dl_fn->cuda_dl->cuCtxPopCurrent(&dummy));
247 }
248
nvenc_open_session(AVCodecContext * avctx)249 static av_cold int nvenc_open_session(AVCodecContext *avctx)
250 {
251 NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS params = { 0 };
252 NvencContext *ctx = avctx->priv_data;
253 NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &ctx->nvenc_dload_funcs.nvenc_funcs;
254 NVENCSTATUS ret;
255
256 params.version = NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER;
257 params.apiVersion = NVENCAPI_VERSION;
258 if (ctx->d3d11_device) {
259 params.device = ctx->d3d11_device;
260 params.deviceType = NV_ENC_DEVICE_TYPE_DIRECTX;
261 } else {
262 params.device = ctx->cu_context;
263 params.deviceType = NV_ENC_DEVICE_TYPE_CUDA;
264 }
265
266 ret = p_nvenc->nvEncOpenEncodeSessionEx(¶ms, &ctx->nvencoder);
267 if (ret != NV_ENC_SUCCESS) {
268 ctx->nvencoder = NULL;
269 return nvenc_print_error(avctx, ret, "OpenEncodeSessionEx failed");
270 }
271
272 return 0;
273 }
274
nvenc_check_codec_support(AVCodecContext * avctx)275 static int nvenc_check_codec_support(AVCodecContext *avctx)
276 {
277 NvencContext *ctx = avctx->priv_data;
278 NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &ctx->nvenc_dload_funcs.nvenc_funcs;
279 int i, ret, count = 0;
280 GUID *guids = NULL;
281
282 ret = p_nvenc->nvEncGetEncodeGUIDCount(ctx->nvencoder, &count);
283
284 if (ret != NV_ENC_SUCCESS || !count)
285 return AVERROR(ENOSYS);
286
287 guids = av_malloc(count * sizeof(GUID));
288 if (!guids)
289 return AVERROR(ENOMEM);
290
291 ret = p_nvenc->nvEncGetEncodeGUIDs(ctx->nvencoder, guids, count, &count);
292 if (ret != NV_ENC_SUCCESS) {
293 ret = AVERROR(ENOSYS);
294 goto fail;
295 }
296
297 ret = AVERROR(ENOSYS);
298 for (i = 0; i < count; i++) {
299 if (!memcmp(&guids[i], &ctx->init_encode_params.encodeGUID, sizeof(*guids))) {
300 ret = 0;
301 break;
302 }
303 }
304
305 fail:
306 av_free(guids);
307
308 return ret;
309 }
310
nvenc_check_cap(AVCodecContext * avctx,NV_ENC_CAPS cap)311 static int nvenc_check_cap(AVCodecContext *avctx, NV_ENC_CAPS cap)
312 {
313 NvencContext *ctx = avctx->priv_data;
314 NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &ctx->nvenc_dload_funcs.nvenc_funcs;
315 NV_ENC_CAPS_PARAM params = { 0 };
316 int ret, val = 0;
317
318 params.version = NV_ENC_CAPS_PARAM_VER;
319 params.capsToQuery = cap;
320
321 ret = p_nvenc->nvEncGetEncodeCaps(ctx->nvencoder, ctx->init_encode_params.encodeGUID, ¶ms, &val);
322
323 if (ret == NV_ENC_SUCCESS)
324 return val;
325 return 0;
326 }
327
nvenc_check_capabilities(AVCodecContext * avctx)328 static int nvenc_check_capabilities(AVCodecContext *avctx)
329 {
330 NvencContext *ctx = avctx->priv_data;
331 int ret;
332
333 ret = nvenc_check_codec_support(avctx);
334 if (ret < 0) {
335 av_log(avctx, AV_LOG_WARNING, "Codec not supported\n");
336 return ret;
337 }
338
339 ret = nvenc_check_cap(avctx, NV_ENC_CAPS_SUPPORT_YUV444_ENCODE);
340 if (IS_YUV444(ctx->data_pix_fmt) && ret <= 0) {
341 av_log(avctx, AV_LOG_WARNING, "YUV444P not supported\n");
342 return AVERROR(ENOSYS);
343 }
344
345 ret = nvenc_check_cap(avctx, NV_ENC_CAPS_SUPPORT_LOSSLESS_ENCODE);
346 if (ctx->preset >= PRESET_LOSSLESS_DEFAULT && ret <= 0) {
347 av_log(avctx, AV_LOG_WARNING, "Lossless encoding not supported\n");
348 return AVERROR(ENOSYS);
349 }
350
351 ret = nvenc_check_cap(avctx, NV_ENC_CAPS_WIDTH_MAX);
352 if (ret < avctx->width) {
353 av_log(avctx, AV_LOG_WARNING, "Width %d exceeds %d\n",
354 avctx->width, ret);
355 return AVERROR(ENOSYS);
356 }
357
358 ret = nvenc_check_cap(avctx, NV_ENC_CAPS_HEIGHT_MAX);
359 if (ret < avctx->height) {
360 av_log(avctx, AV_LOG_WARNING, "Height %d exceeds %d\n",
361 avctx->height, ret);
362 return AVERROR(ENOSYS);
363 }
364
365 ret = nvenc_check_cap(avctx, NV_ENC_CAPS_NUM_MAX_BFRAMES);
366 if (ret < avctx->max_b_frames) {
367 av_log(avctx, AV_LOG_WARNING, "Max B-frames %d exceed %d\n",
368 avctx->max_b_frames, ret);
369
370 return AVERROR(ENOSYS);
371 }
372
373 ret = nvenc_check_cap(avctx, NV_ENC_CAPS_SUPPORT_FIELD_ENCODING);
374 if (ret < 1 && avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
375 av_log(avctx, AV_LOG_WARNING,
376 "Interlaced encoding is not supported. Supported level: %d\n",
377 ret);
378 return AVERROR(ENOSYS);
379 }
380
381 ret = nvenc_check_cap(avctx, NV_ENC_CAPS_SUPPORT_10BIT_ENCODE);
382 if (IS_10BIT(ctx->data_pix_fmt) && ret <= 0) {
383 av_log(avctx, AV_LOG_WARNING, "10 bit encode not supported\n");
384 return AVERROR(ENOSYS);
385 }
386
387 ret = nvenc_check_cap(avctx, NV_ENC_CAPS_SUPPORT_LOOKAHEAD);
388 if (ctx->rc_lookahead > 0 && ret <= 0) {
389 av_log(avctx, AV_LOG_WARNING, "RC lookahead not supported\n");
390 return AVERROR(ENOSYS);
391 }
392
393 ret = nvenc_check_cap(avctx, NV_ENC_CAPS_SUPPORT_TEMPORAL_AQ);
394 if (ctx->temporal_aq > 0 && ret <= 0) {
395 av_log(avctx, AV_LOG_WARNING, "Temporal AQ not supported\n");
396 return AVERROR(ENOSYS);
397 }
398
399 ret = nvenc_check_cap(avctx, NV_ENC_CAPS_SUPPORT_WEIGHTED_PREDICTION);
400 if (ctx->weighted_pred > 0 && ret <= 0) {
401 av_log (avctx, AV_LOG_WARNING, "Weighted Prediction not supported\n");
402 return AVERROR(ENOSYS);
403 }
404
405 ret = nvenc_check_cap(avctx, NV_ENC_CAPS_SUPPORT_CABAC);
406 if (ctx->coder == NV_ENC_H264_ENTROPY_CODING_MODE_CABAC && ret <= 0) {
407 av_log(avctx, AV_LOG_WARNING, "CABAC entropy coding not supported\n");
408 return AVERROR(ENOSYS);
409 }
410
411 #ifdef NVENC_HAVE_BFRAME_REF_MODE
412 ret = nvenc_check_cap(avctx, NV_ENC_CAPS_SUPPORT_BFRAME_REF_MODE);
413 if (ctx->b_ref_mode == NV_ENC_BFRAME_REF_MODE_EACH && ret != 1) {
414 av_log(avctx, AV_LOG_WARNING, "Each B frame as reference is not supported\n");
415 return AVERROR(ENOSYS);
416 } else if (ctx->b_ref_mode != NV_ENC_BFRAME_REF_MODE_DISABLED && ret == 0) {
417 av_log(avctx, AV_LOG_WARNING, "B frames as references are not supported\n");
418 return AVERROR(ENOSYS);
419 }
420 #else
421 if (ctx->b_ref_mode != 0) {
422 av_log(avctx, AV_LOG_WARNING, "B frames as references need SDK 8.1 at build time\n");
423 return AVERROR(ENOSYS);
424 }
425 #endif
426
427 #ifdef NVENC_HAVE_MULTIPLE_REF_FRAMES
428 ret = nvenc_check_cap(avctx, NV_ENC_CAPS_SUPPORT_MULTIPLE_REF_FRAMES);
429 if(avctx->refs != NV_ENC_NUM_REF_FRAMES_AUTOSELECT && ret <= 0) {
430 av_log(avctx, AV_LOG_WARNING, "Multiple reference frames are not supported by the device\n");
431 return AVERROR(ENOSYS);
432 }
433 #else
434 if(avctx->refs != 0) {
435 av_log(avctx, AV_LOG_WARNING, "Multiple reference frames need SDK 9.1 at build time\n");
436 return AVERROR(ENOSYS);
437 }
438 #endif
439
440 ctx->support_dyn_bitrate = nvenc_check_cap(avctx, NV_ENC_CAPS_SUPPORT_DYN_BITRATE_CHANGE);
441
442 return 0;
443 }
444
nvenc_check_device(AVCodecContext * avctx,int idx)445 static av_cold int nvenc_check_device(AVCodecContext *avctx, int idx)
446 {
447 NvencContext *ctx = avctx->priv_data;
448 NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
449 NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
450 char name[128] = { 0};
451 int major, minor, ret;
452 CUdevice cu_device;
453 int loglevel = AV_LOG_VERBOSE;
454
455 if (ctx->device == LIST_DEVICES)
456 loglevel = AV_LOG_INFO;
457
458 ret = CHECK_CU(dl_fn->cuda_dl->cuDeviceGet(&cu_device, idx));
459 if (ret < 0)
460 return ret;
461
462 ret = CHECK_CU(dl_fn->cuda_dl->cuDeviceGetName(name, sizeof(name), cu_device));
463 if (ret < 0)
464 return ret;
465
466 ret = CHECK_CU(dl_fn->cuda_dl->cuDeviceComputeCapability(&major, &minor, cu_device));
467 if (ret < 0)
468 return ret;
469
470 av_log(avctx, loglevel, "[ GPU #%d - < %s > has Compute SM %d.%d ]\n", idx, name, major, minor);
471 if (((major << 4) | minor) < NVENC_CAP) {
472 av_log(avctx, loglevel, "does not support NVENC\n");
473 goto fail;
474 }
475
476 if (ctx->device != idx && ctx->device != ANY_DEVICE)
477 return -1;
478
479 ret = CHECK_CU(dl_fn->cuda_dl->cuCtxCreate(&ctx->cu_context_internal, 0, cu_device));
480 if (ret < 0)
481 goto fail;
482
483 ctx->cu_context = ctx->cu_context_internal;
484 ctx->cu_stream = NULL;
485
486 if ((ret = nvenc_pop_context(avctx)) < 0)
487 goto fail2;
488
489 if ((ret = nvenc_open_session(avctx)) < 0)
490 goto fail2;
491
492 if ((ret = nvenc_check_capabilities(avctx)) < 0)
493 goto fail3;
494
495 av_log(avctx, loglevel, "supports NVENC\n");
496
497 dl_fn->nvenc_device_count++;
498
499 if (ctx->device == idx || ctx->device == ANY_DEVICE)
500 return 0;
501
502 fail3:
503 if ((ret = nvenc_push_context(avctx)) < 0)
504 return ret;
505
506 p_nvenc->nvEncDestroyEncoder(ctx->nvencoder);
507 ctx->nvencoder = NULL;
508
509 if ((ret = nvenc_pop_context(avctx)) < 0)
510 return ret;
511
512 fail2:
513 CHECK_CU(dl_fn->cuda_dl->cuCtxDestroy(ctx->cu_context_internal));
514 ctx->cu_context_internal = NULL;
515
516 fail:
517 return AVERROR(ENOSYS);
518 }
519
nvenc_setup_device(AVCodecContext * avctx)520 static av_cold int nvenc_setup_device(AVCodecContext *avctx)
521 {
522 NvencContext *ctx = avctx->priv_data;
523 NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
524
525 switch (avctx->codec->id) {
526 case AV_CODEC_ID_H264:
527 ctx->init_encode_params.encodeGUID = NV_ENC_CODEC_H264_GUID;
528 break;
529 case AV_CODEC_ID_HEVC:
530 ctx->init_encode_params.encodeGUID = NV_ENC_CODEC_HEVC_GUID;
531 break;
532 default:
533 return AVERROR_BUG;
534 }
535
536 if (avctx->pix_fmt == AV_PIX_FMT_CUDA || avctx->pix_fmt == AV_PIX_FMT_D3D11 || avctx->hw_frames_ctx || avctx->hw_device_ctx) {
537 AVHWFramesContext *frames_ctx;
538 AVHWDeviceContext *hwdev_ctx;
539 AVCUDADeviceContext *cuda_device_hwctx = NULL;
540 #if CONFIG_D3D11VA
541 AVD3D11VADeviceContext *d3d11_device_hwctx = NULL;
542 #endif
543 int ret;
544
545 if (avctx->hw_frames_ctx) {
546 frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
547 if (frames_ctx->format == AV_PIX_FMT_CUDA)
548 cuda_device_hwctx = frames_ctx->device_ctx->hwctx;
549 #if CONFIG_D3D11VA
550 else if (frames_ctx->format == AV_PIX_FMT_D3D11)
551 d3d11_device_hwctx = frames_ctx->device_ctx->hwctx;
552 #endif
553 else
554 return AVERROR(EINVAL);
555 } else if (avctx->hw_device_ctx) {
556 hwdev_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
557 if (hwdev_ctx->type == AV_HWDEVICE_TYPE_CUDA)
558 cuda_device_hwctx = hwdev_ctx->hwctx;
559 #if CONFIG_D3D11VA
560 else if (hwdev_ctx->type == AV_HWDEVICE_TYPE_D3D11VA)
561 d3d11_device_hwctx = hwdev_ctx->hwctx;
562 #endif
563 else
564 return AVERROR(EINVAL);
565 } else {
566 return AVERROR(EINVAL);
567 }
568
569 if (cuda_device_hwctx) {
570 ctx->cu_context = cuda_device_hwctx->cuda_ctx;
571 ctx->cu_stream = cuda_device_hwctx->stream;
572 }
573 #if CONFIG_D3D11VA
574 else if (d3d11_device_hwctx) {
575 ctx->d3d11_device = d3d11_device_hwctx->device;
576 ID3D11Device_AddRef(ctx->d3d11_device);
577 }
578 #endif
579
580 ret = nvenc_open_session(avctx);
581 if (ret < 0)
582 return ret;
583
584 ret = nvenc_check_capabilities(avctx);
585 if (ret < 0) {
586 av_log(avctx, AV_LOG_FATAL, "Provided device doesn't support required NVENC features\n");
587 return ret;
588 }
589 } else {
590 int i, nb_devices = 0;
591
592 if (CHECK_CU(dl_fn->cuda_dl->cuInit(0)) < 0)
593 return AVERROR_UNKNOWN;
594
595 if (CHECK_CU(dl_fn->cuda_dl->cuDeviceGetCount(&nb_devices)) < 0)
596 return AVERROR_UNKNOWN;
597
598 if (!nb_devices) {
599 av_log(avctx, AV_LOG_FATAL, "No CUDA capable devices found\n");
600 return AVERROR_EXTERNAL;
601 }
602
603 av_log(avctx, AV_LOG_VERBOSE, "%d CUDA capable devices found\n", nb_devices);
604
605 dl_fn->nvenc_device_count = 0;
606 for (i = 0; i < nb_devices; ++i) {
607 if ((nvenc_check_device(avctx, i)) >= 0 && ctx->device != LIST_DEVICES)
608 return 0;
609 }
610
611 if (ctx->device == LIST_DEVICES)
612 return AVERROR_EXIT;
613
614 if (!dl_fn->nvenc_device_count) {
615 av_log(avctx, AV_LOG_FATAL, "No capable devices found\n");
616 return AVERROR_EXTERNAL;
617 }
618
619 av_log(avctx, AV_LOG_FATAL, "Requested GPU %d, but only %d GPUs are available!\n", ctx->device, nb_devices);
620 return AVERROR(EINVAL);
621 }
622
623 return 0;
624 }
625
626 typedef struct GUIDTuple {
627 const GUID guid;
628 int flags;
629 } GUIDTuple;
630
631 #define PRESET_ALIAS(alias, name, ...) \
632 [PRESET_ ## alias] = { NV_ENC_PRESET_ ## name ## _GUID, __VA_ARGS__ }
633
634 #define PRESET(name, ...) PRESET_ALIAS(name, name, __VA_ARGS__)
635
nvenc_map_preset(NvencContext * ctx)636 static void nvenc_map_preset(NvencContext *ctx)
637 {
638 GUIDTuple presets[] = {
639 PRESET(DEFAULT),
640 PRESET(HP),
641 PRESET(HQ),
642 PRESET(BD),
643 PRESET_ALIAS(SLOW, HQ, NVENC_TWO_PASSES),
644 PRESET_ALIAS(MEDIUM, HQ, NVENC_ONE_PASS),
645 PRESET_ALIAS(FAST, HP, NVENC_ONE_PASS),
646 PRESET(LOW_LATENCY_DEFAULT, NVENC_LOWLATENCY),
647 PRESET(LOW_LATENCY_HP, NVENC_LOWLATENCY),
648 PRESET(LOW_LATENCY_HQ, NVENC_LOWLATENCY),
649 PRESET(LOSSLESS_DEFAULT, NVENC_LOSSLESS),
650 PRESET(LOSSLESS_HP, NVENC_LOSSLESS),
651 };
652
653 GUIDTuple *t = &presets[ctx->preset];
654
655 ctx->init_encode_params.presetGUID = t->guid;
656 ctx->flags = t->flags;
657 }
658
659 #undef PRESET
660 #undef PRESET_ALIAS
661
set_constqp(AVCodecContext * avctx)662 static av_cold void set_constqp(AVCodecContext *avctx)
663 {
664 NvencContext *ctx = avctx->priv_data;
665 NV_ENC_RC_PARAMS *rc = &ctx->encode_config.rcParams;
666
667 rc->rateControlMode = NV_ENC_PARAMS_RC_CONSTQP;
668
669 if (ctx->init_qp_p >= 0) {
670 rc->constQP.qpInterP = ctx->init_qp_p;
671 if (ctx->init_qp_i >= 0 && ctx->init_qp_b >= 0) {
672 rc->constQP.qpIntra = ctx->init_qp_i;
673 rc->constQP.qpInterB = ctx->init_qp_b;
674 } else if (avctx->i_quant_factor != 0.0 && avctx->b_quant_factor != 0.0) {
675 rc->constQP.qpIntra = av_clip(
676 rc->constQP.qpInterP * fabs(avctx->i_quant_factor) + avctx->i_quant_offset + 0.5, 0, 51);
677 rc->constQP.qpInterB = av_clip(
678 rc->constQP.qpInterP * fabs(avctx->b_quant_factor) + avctx->b_quant_offset + 0.5, 0, 51);
679 } else {
680 rc->constQP.qpIntra = rc->constQP.qpInterP;
681 rc->constQP.qpInterB = rc->constQP.qpInterP;
682 }
683 } else if (ctx->cqp >= 0) {
684 rc->constQP.qpInterP = rc->constQP.qpInterB = rc->constQP.qpIntra = ctx->cqp;
685 if (avctx->b_quant_factor != 0.0)
686 rc->constQP.qpInterB = av_clip(ctx->cqp * fabs(avctx->b_quant_factor) + avctx->b_quant_offset + 0.5, 0, 51);
687 if (avctx->i_quant_factor != 0.0)
688 rc->constQP.qpIntra = av_clip(ctx->cqp * fabs(avctx->i_quant_factor) + avctx->i_quant_offset + 0.5, 0, 51);
689 }
690
691 avctx->qmin = -1;
692 avctx->qmax = -1;
693 }
694
set_vbr(AVCodecContext * avctx)695 static av_cold void set_vbr(AVCodecContext *avctx)
696 {
697 NvencContext *ctx = avctx->priv_data;
698 NV_ENC_RC_PARAMS *rc = &ctx->encode_config.rcParams;
699 int qp_inter_p;
700
701 if (avctx->qmin >= 0 && avctx->qmax >= 0) {
702 rc->enableMinQP = 1;
703 rc->enableMaxQP = 1;
704
705 rc->minQP.qpInterB = avctx->qmin;
706 rc->minQP.qpInterP = avctx->qmin;
707 rc->minQP.qpIntra = avctx->qmin;
708
709 rc->maxQP.qpInterB = avctx->qmax;
710 rc->maxQP.qpInterP = avctx->qmax;
711 rc->maxQP.qpIntra = avctx->qmax;
712
713 qp_inter_p = (avctx->qmax + 3 * avctx->qmin) / 4; // biased towards Qmin
714 } else if (avctx->qmin >= 0) {
715 rc->enableMinQP = 1;
716
717 rc->minQP.qpInterB = avctx->qmin;
718 rc->minQP.qpInterP = avctx->qmin;
719 rc->minQP.qpIntra = avctx->qmin;
720
721 qp_inter_p = avctx->qmin;
722 } else {
723 qp_inter_p = 26; // default to 26
724 }
725
726 rc->enableInitialRCQP = 1;
727
728 if (ctx->init_qp_p < 0) {
729 rc->initialRCQP.qpInterP = qp_inter_p;
730 } else {
731 rc->initialRCQP.qpInterP = ctx->init_qp_p;
732 }
733
734 if (ctx->init_qp_i < 0) {
735 if (avctx->i_quant_factor != 0.0 && avctx->b_quant_factor != 0.0) {
736 rc->initialRCQP.qpIntra = av_clip(
737 rc->initialRCQP.qpInterP * fabs(avctx->i_quant_factor) + avctx->i_quant_offset + 0.5, 0, 51);
738 } else {
739 rc->initialRCQP.qpIntra = rc->initialRCQP.qpInterP;
740 }
741 } else {
742 rc->initialRCQP.qpIntra = ctx->init_qp_i;
743 }
744
745 if (ctx->init_qp_b < 0) {
746 if (avctx->i_quant_factor != 0.0 && avctx->b_quant_factor != 0.0) {
747 rc->initialRCQP.qpInterB = av_clip(
748 rc->initialRCQP.qpInterP * fabs(avctx->b_quant_factor) + avctx->b_quant_offset + 0.5, 0, 51);
749 } else {
750 rc->initialRCQP.qpInterB = rc->initialRCQP.qpInterP;
751 }
752 } else {
753 rc->initialRCQP.qpInterB = ctx->init_qp_b;
754 }
755 }
756
set_lossless(AVCodecContext * avctx)757 static av_cold void set_lossless(AVCodecContext *avctx)
758 {
759 NvencContext *ctx = avctx->priv_data;
760 NV_ENC_RC_PARAMS *rc = &ctx->encode_config.rcParams;
761
762 rc->rateControlMode = NV_ENC_PARAMS_RC_CONSTQP;
763 rc->constQP.qpInterB = 0;
764 rc->constQP.qpInterP = 0;
765 rc->constQP.qpIntra = 0;
766
767 avctx->qmin = -1;
768 avctx->qmax = -1;
769 }
770
nvenc_override_rate_control(AVCodecContext * avctx)771 static void nvenc_override_rate_control(AVCodecContext *avctx)
772 {
773 NvencContext *ctx = avctx->priv_data;
774 NV_ENC_RC_PARAMS *rc = &ctx->encode_config.rcParams;
775
776 switch (ctx->rc) {
777 case NV_ENC_PARAMS_RC_CONSTQP:
778 set_constqp(avctx);
779 return;
780 case NV_ENC_PARAMS_RC_VBR_MINQP:
781 if (avctx->qmin < 0) {
782 av_log(avctx, AV_LOG_WARNING,
783 "The variable bitrate rate-control requires "
784 "the 'qmin' option set.\n");
785 set_vbr(avctx);
786 return;
787 }
788 /* fall through */
789 case NV_ENC_PARAMS_RC_VBR_HQ:
790 case NV_ENC_PARAMS_RC_VBR:
791 set_vbr(avctx);
792 break;
793 case NV_ENC_PARAMS_RC_CBR:
794 case NV_ENC_PARAMS_RC_CBR_HQ:
795 case NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ:
796 break;
797 }
798
799 rc->rateControlMode = ctx->rc;
800 }
801
nvenc_recalc_surfaces(AVCodecContext * avctx)802 static av_cold int nvenc_recalc_surfaces(AVCodecContext *avctx)
803 {
804 NvencContext *ctx = avctx->priv_data;
805 // default minimum of 4 surfaces
806 // multiply by 2 for number of NVENCs on gpu (hardcode to 2)
807 // another multiply by 2 to avoid blocking next PBB group
808 int nb_surfaces = FFMAX(4, ctx->encode_config.frameIntervalP * 2 * 2);
809
810 // lookahead enabled
811 if (ctx->rc_lookahead > 0) {
812 // +1 is to account for lkd_bound calculation later
813 // +4 is to allow sufficient pipelining with lookahead
814 nb_surfaces = FFMAX(1, FFMAX(nb_surfaces, ctx->rc_lookahead + ctx->encode_config.frameIntervalP + 1 + 4));
815 if (nb_surfaces > ctx->nb_surfaces && ctx->nb_surfaces > 0)
816 {
817 av_log(avctx, AV_LOG_WARNING,
818 "Defined rc_lookahead requires more surfaces, "
819 "increasing used surfaces %d -> %d\n", ctx->nb_surfaces, nb_surfaces);
820 }
821 ctx->nb_surfaces = FFMAX(nb_surfaces, ctx->nb_surfaces);
822 } else {
823 if (ctx->encode_config.frameIntervalP > 1 && ctx->nb_surfaces < nb_surfaces && ctx->nb_surfaces > 0)
824 {
825 av_log(avctx, AV_LOG_WARNING,
826 "Defined b-frame requires more surfaces, "
827 "increasing used surfaces %d -> %d\n", ctx->nb_surfaces, nb_surfaces);
828 ctx->nb_surfaces = FFMAX(ctx->nb_surfaces, nb_surfaces);
829 }
830 else if (ctx->nb_surfaces <= 0)
831 ctx->nb_surfaces = nb_surfaces;
832 // otherwise use user specified value
833 }
834
835 ctx->nb_surfaces = FFMAX(1, FFMIN(MAX_REGISTERED_FRAMES, ctx->nb_surfaces));
836 ctx->async_depth = FFMIN(ctx->async_depth, ctx->nb_surfaces - 1);
837
838 return 0;
839 }
840
nvenc_setup_rate_control(AVCodecContext * avctx)841 static av_cold void nvenc_setup_rate_control(AVCodecContext *avctx)
842 {
843 NvencContext *ctx = avctx->priv_data;
844
845 if (avctx->global_quality > 0)
846 av_log(avctx, AV_LOG_WARNING, "Using global_quality with nvenc is deprecated. Use qp instead.\n");
847
848 if (ctx->cqp < 0 && avctx->global_quality > 0)
849 ctx->cqp = avctx->global_quality;
850
851 if (avctx->bit_rate > 0) {
852 ctx->encode_config.rcParams.averageBitRate = avctx->bit_rate;
853 } else if (ctx->encode_config.rcParams.averageBitRate > 0) {
854 ctx->encode_config.rcParams.maxBitRate = ctx->encode_config.rcParams.averageBitRate;
855 }
856
857 if (avctx->rc_max_rate > 0)
858 ctx->encode_config.rcParams.maxBitRate = avctx->rc_max_rate;
859
860 if (ctx->rc < 0) {
861 if (ctx->flags & NVENC_ONE_PASS)
862 ctx->twopass = 0;
863 if (ctx->flags & NVENC_TWO_PASSES)
864 ctx->twopass = 1;
865
866 if (ctx->twopass < 0)
867 ctx->twopass = (ctx->flags & NVENC_LOWLATENCY) != 0;
868
869 if (ctx->cbr) {
870 if (ctx->twopass) {
871 ctx->rc = NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ;
872 } else {
873 ctx->rc = NV_ENC_PARAMS_RC_CBR;
874 }
875 } else if (ctx->cqp >= 0) {
876 ctx->rc = NV_ENC_PARAMS_RC_CONSTQP;
877 } else if (ctx->twopass) {
878 ctx->rc = NV_ENC_PARAMS_RC_VBR_HQ;
879 } else if (avctx->qmin >= 0 && avctx->qmax >= 0) {
880 ctx->rc = NV_ENC_PARAMS_RC_VBR_MINQP;
881 }
882 }
883
884 if (ctx->rc >= 0 && ctx->rc & RC_MODE_DEPRECATED) {
885 av_log(avctx, AV_LOG_WARNING, "Specified rc mode is deprecated.\n");
886 av_log(avctx, AV_LOG_WARNING, "\tll_2pass_quality -> cbr_ld_hq\n");
887 av_log(avctx, AV_LOG_WARNING, "\tll_2pass_size -> cbr_hq\n");
888 av_log(avctx, AV_LOG_WARNING, "\tvbr_2pass -> vbr_hq\n");
889 av_log(avctx, AV_LOG_WARNING, "\tvbr_minqp -> (no replacement)\n");
890
891 ctx->rc &= ~RC_MODE_DEPRECATED;
892 }
893
894 if (ctx->flags & NVENC_LOSSLESS) {
895 set_lossless(avctx);
896 } else if (ctx->rc >= 0) {
897 nvenc_override_rate_control(avctx);
898 } else {
899 ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_VBR;
900 set_vbr(avctx);
901 }
902
903 if (avctx->rc_buffer_size > 0) {
904 ctx->encode_config.rcParams.vbvBufferSize = avctx->rc_buffer_size;
905 } else if (ctx->encode_config.rcParams.averageBitRate > 0) {
906 avctx->rc_buffer_size = ctx->encode_config.rcParams.vbvBufferSize = 2 * ctx->encode_config.rcParams.averageBitRate;
907 }
908
909 if (ctx->aq) {
910 ctx->encode_config.rcParams.enableAQ = 1;
911 ctx->encode_config.rcParams.aqStrength = ctx->aq_strength;
912 av_log(avctx, AV_LOG_VERBOSE, "AQ enabled.\n");
913 }
914
915 if (ctx->temporal_aq) {
916 ctx->encode_config.rcParams.enableTemporalAQ = 1;
917 av_log(avctx, AV_LOG_VERBOSE, "Temporal AQ enabled.\n");
918 }
919
920 if (ctx->rc_lookahead > 0) {
921 int lkd_bound = FFMIN(ctx->nb_surfaces, ctx->async_depth) -
922 ctx->encode_config.frameIntervalP - 4;
923
924 if (lkd_bound < 0) {
925 av_log(avctx, AV_LOG_WARNING,
926 "Lookahead not enabled. Increase buffer delay (-delay).\n");
927 } else {
928 ctx->encode_config.rcParams.enableLookahead = 1;
929 ctx->encode_config.rcParams.lookaheadDepth = av_clip(ctx->rc_lookahead, 0, lkd_bound);
930 ctx->encode_config.rcParams.disableIadapt = ctx->no_scenecut;
931 ctx->encode_config.rcParams.disableBadapt = !ctx->b_adapt;
932 av_log(avctx, AV_LOG_VERBOSE,
933 "Lookahead enabled: depth %d, scenecut %s, B-adapt %s.\n",
934 ctx->encode_config.rcParams.lookaheadDepth,
935 ctx->encode_config.rcParams.disableIadapt ? "disabled" : "enabled",
936 ctx->encode_config.rcParams.disableBadapt ? "disabled" : "enabled");
937 }
938 }
939
940 if (ctx->strict_gop) {
941 ctx->encode_config.rcParams.strictGOPTarget = 1;
942 av_log(avctx, AV_LOG_VERBOSE, "Strict GOP target enabled.\n");
943 }
944
945 if (ctx->nonref_p)
946 ctx->encode_config.rcParams.enableNonRefP = 1;
947
948 if (ctx->zerolatency)
949 ctx->encode_config.rcParams.zeroReorderDelay = 1;
950
951 if (ctx->quality) {
952 //convert from float to fixed point 8.8
953 int tmp_quality = (int)(ctx->quality * 256.0f);
954 ctx->encode_config.rcParams.targetQuality = (uint8_t)(tmp_quality >> 8);
955 ctx->encode_config.rcParams.targetQualityLSB = (uint8_t)(tmp_quality & 0xff);
956
957 av_log(avctx, AV_LOG_VERBOSE, "CQ(%d) mode enabled.\n", tmp_quality);
958
959 //CQ mode shall discard avg bitrate & honor max bitrate;
960 ctx->encode_config.rcParams.averageBitRate = avctx->bit_rate = 0;
961 ctx->encode_config.rcParams.maxBitRate = avctx->rc_max_rate;
962 }
963 }
964
nvenc_setup_h264_config(AVCodecContext * avctx)965 static av_cold int nvenc_setup_h264_config(AVCodecContext *avctx)
966 {
967 NvencContext *ctx = avctx->priv_data;
968 NV_ENC_CONFIG *cc = &ctx->encode_config;
969 NV_ENC_CONFIG_H264 *h264 = &cc->encodeCodecConfig.h264Config;
970 NV_ENC_CONFIG_H264_VUI_PARAMETERS *vui = &h264->h264VUIParameters;
971
972 vui->colourMatrix = avctx->colorspace;
973 vui->colourPrimaries = avctx->color_primaries;
974 vui->transferCharacteristics = avctx->color_trc;
975 vui->videoFullRangeFlag = (avctx->color_range == AVCOL_RANGE_JPEG
976 || ctx->data_pix_fmt == AV_PIX_FMT_YUVJ420P || ctx->data_pix_fmt == AV_PIX_FMT_YUVJ422P || ctx->data_pix_fmt == AV_PIX_FMT_YUVJ444P);
977
978 vui->colourDescriptionPresentFlag =
979 (avctx->colorspace != 2 || avctx->color_primaries != 2 || avctx->color_trc != 2);
980
981 vui->videoSignalTypePresentFlag =
982 (vui->colourDescriptionPresentFlag
983 || vui->videoFormat != 5
984 || vui->videoFullRangeFlag != 0);
985
986 h264->sliceMode = 3;
987 h264->sliceModeData = 1;
988
989 h264->disableSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 1 : 0;
990 h264->repeatSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 0 : 1;
991 h264->outputAUD = ctx->aud;
992
993 if (ctx->dpb_size >= 0) {
994 /* 0 means "let the hardware decide" */
995 h264->maxNumRefFrames = ctx->dpb_size;
996 }
997 if (avctx->gop_size >= 0) {
998 h264->idrPeriod = cc->gopLength;
999 }
1000
1001 if (IS_CBR(cc->rcParams.rateControlMode)) {
1002 h264->outputBufferingPeriodSEI = 1;
1003 }
1004
1005 h264->outputPictureTimingSEI = 1;
1006
1007 if (cc->rcParams.rateControlMode == NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ ||
1008 cc->rcParams.rateControlMode == NV_ENC_PARAMS_RC_CBR_HQ ||
1009 cc->rcParams.rateControlMode == NV_ENC_PARAMS_RC_VBR_HQ) {
1010 h264->adaptiveTransformMode = NV_ENC_H264_ADAPTIVE_TRANSFORM_ENABLE;
1011 h264->fmoMode = NV_ENC_H264_FMO_DISABLE;
1012 }
1013
1014 if (ctx->flags & NVENC_LOSSLESS) {
1015 h264->qpPrimeYZeroTransformBypassFlag = 1;
1016 } else {
1017 switch(ctx->profile) {
1018 case NV_ENC_H264_PROFILE_BASELINE:
1019 cc->profileGUID = NV_ENC_H264_PROFILE_BASELINE_GUID;
1020 avctx->profile = FF_PROFILE_H264_BASELINE;
1021 break;
1022 case NV_ENC_H264_PROFILE_MAIN:
1023 cc->profileGUID = NV_ENC_H264_PROFILE_MAIN_GUID;
1024 avctx->profile = FF_PROFILE_H264_MAIN;
1025 break;
1026 case NV_ENC_H264_PROFILE_HIGH:
1027 cc->profileGUID = NV_ENC_H264_PROFILE_HIGH_GUID;
1028 avctx->profile = FF_PROFILE_H264_HIGH;
1029 break;
1030 case NV_ENC_H264_PROFILE_HIGH_444P:
1031 cc->profileGUID = NV_ENC_H264_PROFILE_HIGH_444_GUID;
1032 avctx->profile = FF_PROFILE_H264_HIGH_444_PREDICTIVE;
1033 break;
1034 }
1035 }
1036
1037 // force setting profile as high444p if input is AV_PIX_FMT_YUV444P
1038 if (ctx->data_pix_fmt == AV_PIX_FMT_YUV444P) {
1039 cc->profileGUID = NV_ENC_H264_PROFILE_HIGH_444_GUID;
1040 avctx->profile = FF_PROFILE_H264_HIGH_444_PREDICTIVE;
1041 }
1042
1043 h264->chromaFormatIDC = avctx->profile == FF_PROFILE_H264_HIGH_444_PREDICTIVE ? 3 : 1;
1044
1045 h264->level = ctx->level;
1046
1047 if (ctx->coder >= 0)
1048 h264->entropyCodingMode = ctx->coder;
1049
1050 #ifdef NVENC_HAVE_BFRAME_REF_MODE
1051 h264->useBFramesAsRef = ctx->b_ref_mode;
1052 #endif
1053
1054 #ifdef NVENC_HAVE_MULTIPLE_REF_FRAMES
1055 h264->numRefL0 = avctx->refs;
1056 h264->numRefL1 = avctx->refs;
1057 #endif
1058
1059 return 0;
1060 }
1061
nvenc_setup_hevc_config(AVCodecContext * avctx)1062 static av_cold int nvenc_setup_hevc_config(AVCodecContext *avctx)
1063 {
1064 NvencContext *ctx = avctx->priv_data;
1065 NV_ENC_CONFIG *cc = &ctx->encode_config;
1066 NV_ENC_CONFIG_HEVC *hevc = &cc->encodeCodecConfig.hevcConfig;
1067 NV_ENC_CONFIG_HEVC_VUI_PARAMETERS *vui = &hevc->hevcVUIParameters;
1068
1069 vui->colourMatrix = avctx->colorspace;
1070 vui->colourPrimaries = avctx->color_primaries;
1071 vui->transferCharacteristics = avctx->color_trc;
1072 vui->videoFullRangeFlag = (avctx->color_range == AVCOL_RANGE_JPEG
1073 || ctx->data_pix_fmt == AV_PIX_FMT_YUVJ420P || ctx->data_pix_fmt == AV_PIX_FMT_YUVJ422P || ctx->data_pix_fmt == AV_PIX_FMT_YUVJ444P);
1074
1075 vui->colourDescriptionPresentFlag =
1076 (avctx->colorspace != 2 || avctx->color_primaries != 2 || avctx->color_trc != 2);
1077
1078 vui->videoSignalTypePresentFlag =
1079 (vui->colourDescriptionPresentFlag
1080 || vui->videoFormat != 5
1081 || vui->videoFullRangeFlag != 0);
1082
1083 hevc->sliceMode = 3;
1084 hevc->sliceModeData = 1;
1085
1086 hevc->disableSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 1 : 0;
1087 hevc->repeatSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 0 : 1;
1088 hevc->outputAUD = ctx->aud;
1089
1090 if (ctx->dpb_size >= 0) {
1091 /* 0 means "let the hardware decide" */
1092 hevc->maxNumRefFramesInDPB = ctx->dpb_size;
1093 }
1094 if (avctx->gop_size >= 0) {
1095 hevc->idrPeriod = cc->gopLength;
1096 }
1097
1098 if (IS_CBR(cc->rcParams.rateControlMode)) {
1099 hevc->outputBufferingPeriodSEI = 1;
1100 }
1101
1102 hevc->outputPictureTimingSEI = 1;
1103
1104 switch (ctx->profile) {
1105 case NV_ENC_HEVC_PROFILE_MAIN:
1106 cc->profileGUID = NV_ENC_HEVC_PROFILE_MAIN_GUID;
1107 avctx->profile = FF_PROFILE_HEVC_MAIN;
1108 break;
1109 case NV_ENC_HEVC_PROFILE_MAIN_10:
1110 cc->profileGUID = NV_ENC_HEVC_PROFILE_MAIN10_GUID;
1111 avctx->profile = FF_PROFILE_HEVC_MAIN_10;
1112 break;
1113 case NV_ENC_HEVC_PROFILE_REXT:
1114 cc->profileGUID = NV_ENC_HEVC_PROFILE_FREXT_GUID;
1115 avctx->profile = FF_PROFILE_HEVC_REXT;
1116 break;
1117 }
1118
1119 // force setting profile as main10 if input is 10 bit
1120 if (IS_10BIT(ctx->data_pix_fmt)) {
1121 cc->profileGUID = NV_ENC_HEVC_PROFILE_MAIN10_GUID;
1122 avctx->profile = FF_PROFILE_HEVC_MAIN_10;
1123 }
1124
1125 // force setting profile as rext if input is yuv444
1126 if (IS_YUV444(ctx->data_pix_fmt)) {
1127 cc->profileGUID = NV_ENC_HEVC_PROFILE_FREXT_GUID;
1128 avctx->profile = FF_PROFILE_HEVC_REXT;
1129 }
1130
1131 hevc->chromaFormatIDC = IS_YUV444(ctx->data_pix_fmt) ? 3 : 1;
1132
1133 hevc->pixelBitDepthMinus8 = IS_10BIT(ctx->data_pix_fmt) ? 2 : 0;
1134
1135 hevc->level = ctx->level;
1136
1137 hevc->tier = ctx->tier;
1138
1139 #ifdef NVENC_HAVE_HEVC_BFRAME_REF_MODE
1140 hevc->useBFramesAsRef = ctx->b_ref_mode;
1141 #endif
1142
1143 #ifdef NVENC_HAVE_MULTIPLE_REF_FRAMES
1144 hevc->numRefL0 = avctx->refs;
1145 hevc->numRefL1 = avctx->refs;
1146 #endif
1147
1148 return 0;
1149 }
1150
nvenc_setup_codec_config(AVCodecContext * avctx)1151 static av_cold int nvenc_setup_codec_config(AVCodecContext *avctx)
1152 {
1153 switch (avctx->codec->id) {
1154 case AV_CODEC_ID_H264:
1155 return nvenc_setup_h264_config(avctx);
1156 case AV_CODEC_ID_HEVC:
1157 return nvenc_setup_hevc_config(avctx);
1158 /* Earlier switch/case will return if unknown codec is passed. */
1159 }
1160
1161 return 0;
1162 }
1163
compute_dar(AVCodecContext * avctx,int * dw,int * dh)1164 static void compute_dar(AVCodecContext *avctx, int *dw, int *dh) {
1165 int sw, sh;
1166
1167 sw = avctx->width;
1168 sh = avctx->height;
1169
1170 if (avctx->sample_aspect_ratio.num > 0 && avctx->sample_aspect_ratio.den > 0) {
1171 sw *= avctx->sample_aspect_ratio.num;
1172 sh *= avctx->sample_aspect_ratio.den;
1173 }
1174
1175 av_reduce(dw, dh, sw, sh, 1024 * 1024);
1176 }
1177
nvenc_setup_encoder(AVCodecContext * avctx)1178 static av_cold int nvenc_setup_encoder(AVCodecContext *avctx)
1179 {
1180 NvencContext *ctx = avctx->priv_data;
1181 NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
1182 NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
1183
1184 NV_ENC_PRESET_CONFIG preset_config = { 0 };
1185 NVENCSTATUS nv_status = NV_ENC_SUCCESS;
1186 AVCPBProperties *cpb_props;
1187 int res = 0;
1188 int dw, dh;
1189
1190 ctx->encode_config.version = NV_ENC_CONFIG_VER;
1191 ctx->init_encode_params.version = NV_ENC_INITIALIZE_PARAMS_VER;
1192
1193 ctx->init_encode_params.encodeHeight = avctx->height;
1194 ctx->init_encode_params.encodeWidth = avctx->width;
1195
1196 ctx->init_encode_params.encodeConfig = &ctx->encode_config;
1197
1198 nvenc_map_preset(ctx);
1199
1200 preset_config.version = NV_ENC_PRESET_CONFIG_VER;
1201 preset_config.presetCfg.version = NV_ENC_CONFIG_VER;
1202
1203 nv_status = p_nvenc->nvEncGetEncodePresetConfig(ctx->nvencoder,
1204 ctx->init_encode_params.encodeGUID,
1205 ctx->init_encode_params.presetGUID,
1206 &preset_config);
1207 if (nv_status != NV_ENC_SUCCESS)
1208 return nvenc_print_error(avctx, nv_status, "Cannot get the preset configuration");
1209
1210 memcpy(&ctx->encode_config, &preset_config.presetCfg, sizeof(ctx->encode_config));
1211
1212 ctx->encode_config.version = NV_ENC_CONFIG_VER;
1213
1214 compute_dar(avctx, &dw, &dh);
1215 ctx->init_encode_params.darHeight = dh;
1216 ctx->init_encode_params.darWidth = dw;
1217
1218 if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
1219 ctx->init_encode_params.frameRateNum = avctx->framerate.num;
1220 ctx->init_encode_params.frameRateDen = avctx->framerate.den;
1221 } else {
1222 ctx->init_encode_params.frameRateNum = avctx->time_base.den;
1223 ctx->init_encode_params.frameRateDen = avctx->time_base.num * avctx->ticks_per_frame;
1224 }
1225
1226 ctx->init_encode_params.enableEncodeAsync = 0;
1227 ctx->init_encode_params.enablePTD = 1;
1228
1229 if (ctx->weighted_pred == 1)
1230 ctx->init_encode_params.enableWeightedPrediction = 1;
1231
1232 if (ctx->bluray_compat) {
1233 ctx->aud = 1;
1234 ctx->dpb_size = FFMIN(FFMAX(avctx->refs, 0), 6);
1235 avctx->max_b_frames = FFMIN(avctx->max_b_frames, 3);
1236 switch (avctx->codec->id) {
1237 case AV_CODEC_ID_H264:
1238 /* maximum level depends on used resolution */
1239 break;
1240 case AV_CODEC_ID_HEVC:
1241 ctx->level = NV_ENC_LEVEL_HEVC_51;
1242 ctx->tier = NV_ENC_TIER_HEVC_HIGH;
1243 break;
1244 }
1245 }
1246
1247 if (avctx->gop_size > 0) {
1248 if (avctx->max_b_frames >= 0) {
1249 /* 0 is intra-only, 1 is I/P only, 2 is one B-Frame, 3 two B-frames, and so on. */
1250 ctx->encode_config.frameIntervalP = avctx->max_b_frames + 1;
1251 }
1252
1253 ctx->encode_config.gopLength = avctx->gop_size;
1254 } else if (avctx->gop_size == 0) {
1255 ctx->encode_config.frameIntervalP = 0;
1256 ctx->encode_config.gopLength = 1;
1257 }
1258
1259 nvenc_recalc_surfaces(avctx);
1260
1261 nvenc_setup_rate_control(avctx);
1262
1263 if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
1264 ctx->encode_config.frameFieldMode = NV_ENC_PARAMS_FRAME_FIELD_MODE_FIELD;
1265 } else {
1266 ctx->encode_config.frameFieldMode = NV_ENC_PARAMS_FRAME_FIELD_MODE_FRAME;
1267 }
1268
1269 res = nvenc_setup_codec_config(avctx);
1270 if (res)
1271 return res;
1272
1273 res = nvenc_push_context(avctx);
1274 if (res < 0)
1275 return res;
1276
1277 nv_status = p_nvenc->nvEncInitializeEncoder(ctx->nvencoder, &ctx->init_encode_params);
1278 if (nv_status != NV_ENC_SUCCESS) {
1279 nvenc_pop_context(avctx);
1280 return nvenc_print_error(avctx, nv_status, "InitializeEncoder failed");
1281 }
1282
1283 #ifdef NVENC_HAVE_CUSTREAM_PTR
1284 if (ctx->cu_context) {
1285 nv_status = p_nvenc->nvEncSetIOCudaStreams(ctx->nvencoder, &ctx->cu_stream, &ctx->cu_stream);
1286 if (nv_status != NV_ENC_SUCCESS) {
1287 nvenc_pop_context(avctx);
1288 return nvenc_print_error(avctx, nv_status, "SetIOCudaStreams failed");
1289 }
1290 }
1291 #endif
1292
1293 res = nvenc_pop_context(avctx);
1294 if (res < 0)
1295 return res;
1296
1297 if (ctx->encode_config.frameIntervalP > 1)
1298 avctx->has_b_frames = 2;
1299
1300 if (ctx->encode_config.rcParams.averageBitRate > 0)
1301 avctx->bit_rate = ctx->encode_config.rcParams.averageBitRate;
1302
1303 cpb_props = ff_add_cpb_side_data(avctx);
1304 if (!cpb_props)
1305 return AVERROR(ENOMEM);
1306 cpb_props->max_bitrate = ctx->encode_config.rcParams.maxBitRate;
1307 cpb_props->avg_bitrate = avctx->bit_rate;
1308 cpb_props->buffer_size = ctx->encode_config.rcParams.vbvBufferSize;
1309
1310 return 0;
1311 }
1312
nvenc_map_buffer_format(enum AVPixelFormat pix_fmt)1313 static NV_ENC_BUFFER_FORMAT nvenc_map_buffer_format(enum AVPixelFormat pix_fmt)
1314 {
1315 switch (pix_fmt) {
1316 case AV_PIX_FMT_YUV420P:
1317 return NV_ENC_BUFFER_FORMAT_YV12_PL;
1318 case AV_PIX_FMT_NV12:
1319 return NV_ENC_BUFFER_FORMAT_NV12_PL;
1320 case AV_PIX_FMT_P010:
1321 case AV_PIX_FMT_P016:
1322 return NV_ENC_BUFFER_FORMAT_YUV420_10BIT;
1323 case AV_PIX_FMT_YUV444P:
1324 return NV_ENC_BUFFER_FORMAT_YUV444_PL;
1325 case AV_PIX_FMT_YUV444P16:
1326 return NV_ENC_BUFFER_FORMAT_YUV444_10BIT;
1327 case AV_PIX_FMT_0RGB32:
1328 return NV_ENC_BUFFER_FORMAT_ARGB;
1329 case AV_PIX_FMT_0BGR32:
1330 return NV_ENC_BUFFER_FORMAT_ABGR;
1331 default:
1332 return NV_ENC_BUFFER_FORMAT_UNDEFINED;
1333 }
1334 }
1335
nvenc_alloc_surface(AVCodecContext * avctx,int idx)1336 static av_cold int nvenc_alloc_surface(AVCodecContext *avctx, int idx)
1337 {
1338 NvencContext *ctx = avctx->priv_data;
1339 NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
1340 NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
1341 NvencSurface* tmp_surface = &ctx->surfaces[idx];
1342
1343 NVENCSTATUS nv_status;
1344 NV_ENC_CREATE_BITSTREAM_BUFFER allocOut = { 0 };
1345 allocOut.version = NV_ENC_CREATE_BITSTREAM_BUFFER_VER;
1346
1347 if (avctx->pix_fmt == AV_PIX_FMT_CUDA || avctx->pix_fmt == AV_PIX_FMT_D3D11) {
1348 ctx->surfaces[idx].in_ref = av_frame_alloc();
1349 if (!ctx->surfaces[idx].in_ref)
1350 return AVERROR(ENOMEM);
1351 } else {
1352 NV_ENC_CREATE_INPUT_BUFFER allocSurf = { 0 };
1353
1354 ctx->surfaces[idx].format = nvenc_map_buffer_format(ctx->data_pix_fmt);
1355 if (ctx->surfaces[idx].format == NV_ENC_BUFFER_FORMAT_UNDEFINED) {
1356 av_log(avctx, AV_LOG_FATAL, "Invalid input pixel format: %s\n",
1357 av_get_pix_fmt_name(ctx->data_pix_fmt));
1358 return AVERROR(EINVAL);
1359 }
1360
1361 allocSurf.version = NV_ENC_CREATE_INPUT_BUFFER_VER;
1362 allocSurf.width = avctx->width;
1363 allocSurf.height = avctx->height;
1364 allocSurf.bufferFmt = ctx->surfaces[idx].format;
1365
1366 nv_status = p_nvenc->nvEncCreateInputBuffer(ctx->nvencoder, &allocSurf);
1367 if (nv_status != NV_ENC_SUCCESS) {
1368 return nvenc_print_error(avctx, nv_status, "CreateInputBuffer failed");
1369 }
1370
1371 ctx->surfaces[idx].input_surface = allocSurf.inputBuffer;
1372 ctx->surfaces[idx].width = allocSurf.width;
1373 ctx->surfaces[idx].height = allocSurf.height;
1374 }
1375
1376 nv_status = p_nvenc->nvEncCreateBitstreamBuffer(ctx->nvencoder, &allocOut);
1377 if (nv_status != NV_ENC_SUCCESS) {
1378 int err = nvenc_print_error(avctx, nv_status, "CreateBitstreamBuffer failed");
1379 if (avctx->pix_fmt != AV_PIX_FMT_CUDA && avctx->pix_fmt != AV_PIX_FMT_D3D11)
1380 p_nvenc->nvEncDestroyInputBuffer(ctx->nvencoder, ctx->surfaces[idx].input_surface);
1381 av_frame_free(&ctx->surfaces[idx].in_ref);
1382 return err;
1383 }
1384
1385 ctx->surfaces[idx].output_surface = allocOut.bitstreamBuffer;
1386 ctx->surfaces[idx].size = allocOut.size;
1387
1388 av_fifo_generic_write(ctx->unused_surface_queue, &tmp_surface, sizeof(tmp_surface), NULL);
1389
1390 return 0;
1391 }
1392
nvenc_setup_surfaces(AVCodecContext * avctx)1393 static av_cold int nvenc_setup_surfaces(AVCodecContext *avctx)
1394 {
1395 NvencContext *ctx = avctx->priv_data;
1396 int i, res = 0, res2;
1397
1398 ctx->surfaces = av_mallocz_array(ctx->nb_surfaces, sizeof(*ctx->surfaces));
1399 if (!ctx->surfaces)
1400 return AVERROR(ENOMEM);
1401
1402 ctx->timestamp_list = av_fifo_alloc(ctx->nb_surfaces * sizeof(int64_t));
1403 if (!ctx->timestamp_list)
1404 return AVERROR(ENOMEM);
1405
1406 ctx->unused_surface_queue = av_fifo_alloc(ctx->nb_surfaces * sizeof(NvencSurface*));
1407 if (!ctx->unused_surface_queue)
1408 return AVERROR(ENOMEM);
1409
1410 ctx->output_surface_queue = av_fifo_alloc(ctx->nb_surfaces * sizeof(NvencSurface*));
1411 if (!ctx->output_surface_queue)
1412 return AVERROR(ENOMEM);
1413 ctx->output_surface_ready_queue = av_fifo_alloc(ctx->nb_surfaces * sizeof(NvencSurface*));
1414 if (!ctx->output_surface_ready_queue)
1415 return AVERROR(ENOMEM);
1416
1417 res = nvenc_push_context(avctx);
1418 if (res < 0)
1419 return res;
1420
1421 for (i = 0; i < ctx->nb_surfaces; i++) {
1422 if ((res = nvenc_alloc_surface(avctx, i)) < 0)
1423 goto fail;
1424 }
1425
1426 fail:
1427 res2 = nvenc_pop_context(avctx);
1428 if (res2 < 0)
1429 return res2;
1430
1431 return res;
1432 }
1433
nvenc_setup_extradata(AVCodecContext * avctx)1434 static av_cold int nvenc_setup_extradata(AVCodecContext *avctx)
1435 {
1436 NvencContext *ctx = avctx->priv_data;
1437 NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
1438 NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
1439
1440 NVENCSTATUS nv_status;
1441 uint32_t outSize = 0;
1442 char tmpHeader[256];
1443 NV_ENC_SEQUENCE_PARAM_PAYLOAD payload = { 0 };
1444 payload.version = NV_ENC_SEQUENCE_PARAM_PAYLOAD_VER;
1445
1446 payload.spsppsBuffer = tmpHeader;
1447 payload.inBufferSize = sizeof(tmpHeader);
1448 payload.outSPSPPSPayloadSize = &outSize;
1449
1450 nv_status = p_nvenc->nvEncGetSequenceParams(ctx->nvencoder, &payload);
1451 if (nv_status != NV_ENC_SUCCESS) {
1452 return nvenc_print_error(avctx, nv_status, "GetSequenceParams failed");
1453 }
1454
1455 avctx->extradata_size = outSize;
1456 avctx->extradata = av_mallocz(outSize + AV_INPUT_BUFFER_PADDING_SIZE);
1457
1458 if (!avctx->extradata) {
1459 return AVERROR(ENOMEM);
1460 }
1461
1462 memcpy(avctx->extradata, tmpHeader, outSize);
1463
1464 return 0;
1465 }
1466
ff_nvenc_encode_close(AVCodecContext * avctx)1467 av_cold int ff_nvenc_encode_close(AVCodecContext *avctx)
1468 {
1469 NvencContext *ctx = avctx->priv_data;
1470 NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
1471 NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
1472 int i, res;
1473
1474 /* the encoder has to be flushed before it can be closed */
1475 if (ctx->nvencoder) {
1476 NV_ENC_PIC_PARAMS params = { .version = NV_ENC_PIC_PARAMS_VER,
1477 .encodePicFlags = NV_ENC_PIC_FLAG_EOS };
1478
1479 res = nvenc_push_context(avctx);
1480 if (res < 0)
1481 return res;
1482
1483 p_nvenc->nvEncEncodePicture(ctx->nvencoder, ¶ms);
1484 }
1485
1486 av_fifo_freep(&ctx->timestamp_list);
1487 av_fifo_freep(&ctx->output_surface_ready_queue);
1488 av_fifo_freep(&ctx->output_surface_queue);
1489 av_fifo_freep(&ctx->unused_surface_queue);
1490
1491 if (ctx->surfaces && (avctx->pix_fmt == AV_PIX_FMT_CUDA || avctx->pix_fmt == AV_PIX_FMT_D3D11)) {
1492 for (i = 0; i < ctx->nb_registered_frames; i++) {
1493 if (ctx->registered_frames[i].mapped)
1494 p_nvenc->nvEncUnmapInputResource(ctx->nvencoder, ctx->registered_frames[i].in_map.mappedResource);
1495 if (ctx->registered_frames[i].regptr)
1496 p_nvenc->nvEncUnregisterResource(ctx->nvencoder, ctx->registered_frames[i].regptr);
1497 }
1498 ctx->nb_registered_frames = 0;
1499 }
1500
1501 if (ctx->surfaces) {
1502 for (i = 0; i < ctx->nb_surfaces; ++i) {
1503 if (avctx->pix_fmt != AV_PIX_FMT_CUDA && avctx->pix_fmt != AV_PIX_FMT_D3D11)
1504 p_nvenc->nvEncDestroyInputBuffer(ctx->nvencoder, ctx->surfaces[i].input_surface);
1505 av_frame_free(&ctx->surfaces[i].in_ref);
1506 p_nvenc->nvEncDestroyBitstreamBuffer(ctx->nvencoder, ctx->surfaces[i].output_surface);
1507 }
1508 }
1509 av_freep(&ctx->surfaces);
1510 ctx->nb_surfaces = 0;
1511
1512 if (ctx->nvencoder) {
1513 p_nvenc->nvEncDestroyEncoder(ctx->nvencoder);
1514
1515 res = nvenc_pop_context(avctx);
1516 if (res < 0)
1517 return res;
1518 }
1519 ctx->nvencoder = NULL;
1520
1521 if (ctx->cu_context_internal)
1522 CHECK_CU(dl_fn->cuda_dl->cuCtxDestroy(ctx->cu_context_internal));
1523 ctx->cu_context = ctx->cu_context_internal = NULL;
1524
1525 #if CONFIG_D3D11VA
1526 if (ctx->d3d11_device) {
1527 ID3D11Device_Release(ctx->d3d11_device);
1528 ctx->d3d11_device = NULL;
1529 }
1530 #endif
1531
1532 nvenc_free_functions(&dl_fn->nvenc_dl);
1533 cuda_free_functions(&dl_fn->cuda_dl);
1534
1535 dl_fn->nvenc_device_count = 0;
1536
1537 av_log(avctx, AV_LOG_VERBOSE, "Nvenc unloaded\n");
1538
1539 return 0;
1540 }
1541
ff_nvenc_encode_init(AVCodecContext * avctx)1542 av_cold int ff_nvenc_encode_init(AVCodecContext *avctx)
1543 {
1544 NvencContext *ctx = avctx->priv_data;
1545 int ret;
1546
1547 if (avctx->pix_fmt == AV_PIX_FMT_CUDA || avctx->pix_fmt == AV_PIX_FMT_D3D11) {
1548 AVHWFramesContext *frames_ctx;
1549 if (!avctx->hw_frames_ctx) {
1550 av_log(avctx, AV_LOG_ERROR,
1551 "hw_frames_ctx must be set when using GPU frames as input\n");
1552 return AVERROR(EINVAL);
1553 }
1554 frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1555 if (frames_ctx->format != avctx->pix_fmt) {
1556 av_log(avctx, AV_LOG_ERROR,
1557 "hw_frames_ctx must match the GPU frame type\n");
1558 return AVERROR(EINVAL);
1559 }
1560 ctx->data_pix_fmt = frames_ctx->sw_format;
1561 } else {
1562 ctx->data_pix_fmt = avctx->pix_fmt;
1563 }
1564
1565 if ((ret = nvenc_load_libraries(avctx)) < 0)
1566 return ret;
1567
1568 if ((ret = nvenc_setup_device(avctx)) < 0)
1569 return ret;
1570
1571 if ((ret = nvenc_setup_encoder(avctx)) < 0)
1572 return ret;
1573
1574 if ((ret = nvenc_setup_surfaces(avctx)) < 0)
1575 return ret;
1576
1577 if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
1578 if ((ret = nvenc_setup_extradata(avctx)) < 0)
1579 return ret;
1580 }
1581
1582 return 0;
1583 }
1584
get_free_frame(NvencContext * ctx)1585 static NvencSurface *get_free_frame(NvencContext *ctx)
1586 {
1587 NvencSurface *tmp_surf;
1588
1589 if (!(av_fifo_size(ctx->unused_surface_queue) > 0))
1590 // queue empty
1591 return NULL;
1592
1593 av_fifo_generic_read(ctx->unused_surface_queue, &tmp_surf, sizeof(tmp_surf), NULL);
1594 return tmp_surf;
1595 }
1596
nvenc_copy_frame(AVCodecContext * avctx,NvencSurface * nv_surface,NV_ENC_LOCK_INPUT_BUFFER * lock_buffer_params,const AVFrame * frame)1597 static int nvenc_copy_frame(AVCodecContext *avctx, NvencSurface *nv_surface,
1598 NV_ENC_LOCK_INPUT_BUFFER *lock_buffer_params, const AVFrame *frame)
1599 {
1600 int dst_linesize[4] = {
1601 lock_buffer_params->pitch,
1602 lock_buffer_params->pitch,
1603 lock_buffer_params->pitch,
1604 lock_buffer_params->pitch
1605 };
1606 uint8_t *dst_data[4];
1607 int ret;
1608
1609 if (frame->format == AV_PIX_FMT_YUV420P)
1610 dst_linesize[1] = dst_linesize[2] >>= 1;
1611
1612 ret = av_image_fill_pointers(dst_data, frame->format, nv_surface->height,
1613 lock_buffer_params->bufferDataPtr, dst_linesize);
1614 if (ret < 0)
1615 return ret;
1616
1617 if (frame->format == AV_PIX_FMT_YUV420P)
1618 FFSWAP(uint8_t*, dst_data[1], dst_data[2]);
1619
1620 av_image_copy(dst_data, dst_linesize,
1621 (const uint8_t**)frame->data, frame->linesize, frame->format,
1622 avctx->width, avctx->height);
1623
1624 return 0;
1625 }
1626
nvenc_find_free_reg_resource(AVCodecContext * avctx)1627 static int nvenc_find_free_reg_resource(AVCodecContext *avctx)
1628 {
1629 NvencContext *ctx = avctx->priv_data;
1630 NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
1631 NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
1632 NVENCSTATUS nv_status;
1633
1634 int i, first_round;
1635
1636 if (ctx->nb_registered_frames == FF_ARRAY_ELEMS(ctx->registered_frames)) {
1637 for (first_round = 1; first_round >= 0; first_round--) {
1638 for (i = 0; i < ctx->nb_registered_frames; i++) {
1639 if (!ctx->registered_frames[i].mapped) {
1640 if (ctx->registered_frames[i].regptr) {
1641 if (first_round)
1642 continue;
1643 nv_status = p_nvenc->nvEncUnregisterResource(ctx->nvencoder, ctx->registered_frames[i].regptr);
1644 if (nv_status != NV_ENC_SUCCESS)
1645 return nvenc_print_error(avctx, nv_status, "Failed unregistering unused input resource");
1646 ctx->registered_frames[i].ptr = NULL;
1647 ctx->registered_frames[i].regptr = NULL;
1648 }
1649 return i;
1650 }
1651 }
1652 }
1653 } else {
1654 return ctx->nb_registered_frames++;
1655 }
1656
1657 av_log(avctx, AV_LOG_ERROR, "Too many registered CUDA frames\n");
1658 return AVERROR(ENOMEM);
1659 }
1660
nvenc_register_frame(AVCodecContext * avctx,const AVFrame * frame)1661 static int nvenc_register_frame(AVCodecContext *avctx, const AVFrame *frame)
1662 {
1663 NvencContext *ctx = avctx->priv_data;
1664 NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
1665 NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
1666
1667 AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frame->hw_frames_ctx->data;
1668 NV_ENC_REGISTER_RESOURCE reg;
1669 int i, idx, ret;
1670
1671 for (i = 0; i < ctx->nb_registered_frames; i++) {
1672 if (avctx->pix_fmt == AV_PIX_FMT_CUDA && ctx->registered_frames[i].ptr == frame->data[0])
1673 return i;
1674 else if (avctx->pix_fmt == AV_PIX_FMT_D3D11 && ctx->registered_frames[i].ptr == frame->data[0] && ctx->registered_frames[i].ptr_index == (intptr_t)frame->data[1])
1675 return i;
1676 }
1677
1678 idx = nvenc_find_free_reg_resource(avctx);
1679 if (idx < 0)
1680 return idx;
1681
1682 reg.version = NV_ENC_REGISTER_RESOURCE_VER;
1683 reg.width = frames_ctx->width;
1684 reg.height = frames_ctx->height;
1685 reg.pitch = frame->linesize[0];
1686 reg.resourceToRegister = frame->data[0];
1687
1688 if (avctx->pix_fmt == AV_PIX_FMT_CUDA) {
1689 reg.resourceType = NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR;
1690 }
1691 else if (avctx->pix_fmt == AV_PIX_FMT_D3D11) {
1692 reg.resourceType = NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX;
1693 reg.subResourceIndex = (intptr_t)frame->data[1];
1694 }
1695
1696 reg.bufferFormat = nvenc_map_buffer_format(frames_ctx->sw_format);
1697 if (reg.bufferFormat == NV_ENC_BUFFER_FORMAT_UNDEFINED) {
1698 av_log(avctx, AV_LOG_FATAL, "Invalid input pixel format: %s\n",
1699 av_get_pix_fmt_name(frames_ctx->sw_format));
1700 return AVERROR(EINVAL);
1701 }
1702
1703 ret = p_nvenc->nvEncRegisterResource(ctx->nvencoder, ®);
1704 if (ret != NV_ENC_SUCCESS) {
1705 nvenc_print_error(avctx, ret, "Error registering an input resource");
1706 return AVERROR_UNKNOWN;
1707 }
1708
1709 ctx->registered_frames[idx].ptr = frame->data[0];
1710 ctx->registered_frames[idx].ptr_index = reg.subResourceIndex;
1711 ctx->registered_frames[idx].regptr = reg.registeredResource;
1712 return idx;
1713 }
1714
nvenc_upload_frame(AVCodecContext * avctx,const AVFrame * frame,NvencSurface * nvenc_frame)1715 static int nvenc_upload_frame(AVCodecContext *avctx, const AVFrame *frame,
1716 NvencSurface *nvenc_frame)
1717 {
1718 NvencContext *ctx = avctx->priv_data;
1719 NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
1720 NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
1721
1722 int res;
1723 NVENCSTATUS nv_status;
1724
1725 if (avctx->pix_fmt == AV_PIX_FMT_CUDA || avctx->pix_fmt == AV_PIX_FMT_D3D11) {
1726 int reg_idx = nvenc_register_frame(avctx, frame);
1727 if (reg_idx < 0) {
1728 av_log(avctx, AV_LOG_ERROR, "Could not register an input HW frame\n");
1729 return reg_idx;
1730 }
1731
1732 res = av_frame_ref(nvenc_frame->in_ref, frame);
1733 if (res < 0)
1734 return res;
1735
1736 if (!ctx->registered_frames[reg_idx].mapped) {
1737 ctx->registered_frames[reg_idx].in_map.version = NV_ENC_MAP_INPUT_RESOURCE_VER;
1738 ctx->registered_frames[reg_idx].in_map.registeredResource = ctx->registered_frames[reg_idx].regptr;
1739 nv_status = p_nvenc->nvEncMapInputResource(ctx->nvencoder, &ctx->registered_frames[reg_idx].in_map);
1740 if (nv_status != NV_ENC_SUCCESS) {
1741 av_frame_unref(nvenc_frame->in_ref);
1742 return nvenc_print_error(avctx, nv_status, "Error mapping an input resource");
1743 }
1744 }
1745
1746 ctx->registered_frames[reg_idx].mapped += 1;
1747
1748 nvenc_frame->reg_idx = reg_idx;
1749 nvenc_frame->input_surface = ctx->registered_frames[reg_idx].in_map.mappedResource;
1750 nvenc_frame->format = ctx->registered_frames[reg_idx].in_map.mappedBufferFmt;
1751 nvenc_frame->pitch = frame->linesize[0];
1752
1753 return 0;
1754 } else {
1755 NV_ENC_LOCK_INPUT_BUFFER lockBufferParams = { 0 };
1756
1757 lockBufferParams.version = NV_ENC_LOCK_INPUT_BUFFER_VER;
1758 lockBufferParams.inputBuffer = nvenc_frame->input_surface;
1759
1760 nv_status = p_nvenc->nvEncLockInputBuffer(ctx->nvencoder, &lockBufferParams);
1761 if (nv_status != NV_ENC_SUCCESS) {
1762 return nvenc_print_error(avctx, nv_status, "Failed locking nvenc input buffer");
1763 }
1764
1765 nvenc_frame->pitch = lockBufferParams.pitch;
1766 res = nvenc_copy_frame(avctx, nvenc_frame, &lockBufferParams, frame);
1767
1768 nv_status = p_nvenc->nvEncUnlockInputBuffer(ctx->nvencoder, nvenc_frame->input_surface);
1769 if (nv_status != NV_ENC_SUCCESS) {
1770 return nvenc_print_error(avctx, nv_status, "Failed unlocking input buffer!");
1771 }
1772
1773 return res;
1774 }
1775 }
1776
nvenc_codec_specific_pic_params(AVCodecContext * avctx,NV_ENC_PIC_PARAMS * params,NV_ENC_SEI_PAYLOAD * sei_data)1777 static void nvenc_codec_specific_pic_params(AVCodecContext *avctx,
1778 NV_ENC_PIC_PARAMS *params,
1779 NV_ENC_SEI_PAYLOAD *sei_data)
1780 {
1781 NvencContext *ctx = avctx->priv_data;
1782
1783 switch (avctx->codec->id) {
1784 case AV_CODEC_ID_H264:
1785 params->codecPicParams.h264PicParams.sliceMode =
1786 ctx->encode_config.encodeCodecConfig.h264Config.sliceMode;
1787 params->codecPicParams.h264PicParams.sliceModeData =
1788 ctx->encode_config.encodeCodecConfig.h264Config.sliceModeData;
1789 if (sei_data) {
1790 params->codecPicParams.h264PicParams.seiPayloadArray = sei_data;
1791 params->codecPicParams.h264PicParams.seiPayloadArrayCnt = 1;
1792 }
1793
1794 break;
1795 case AV_CODEC_ID_HEVC:
1796 params->codecPicParams.hevcPicParams.sliceMode =
1797 ctx->encode_config.encodeCodecConfig.hevcConfig.sliceMode;
1798 params->codecPicParams.hevcPicParams.sliceModeData =
1799 ctx->encode_config.encodeCodecConfig.hevcConfig.sliceModeData;
1800 if (sei_data) {
1801 params->codecPicParams.hevcPicParams.seiPayloadArray = sei_data;
1802 params->codecPicParams.hevcPicParams.seiPayloadArrayCnt = 1;
1803 }
1804
1805 break;
1806 }
1807 }
1808
timestamp_queue_enqueue(AVFifoBuffer * queue,int64_t timestamp)1809 static inline void timestamp_queue_enqueue(AVFifoBuffer* queue, int64_t timestamp)
1810 {
1811 av_fifo_generic_write(queue, ×tamp, sizeof(timestamp), NULL);
1812 }
1813
timestamp_queue_dequeue(AVFifoBuffer * queue)1814 static inline int64_t timestamp_queue_dequeue(AVFifoBuffer* queue)
1815 {
1816 int64_t timestamp = AV_NOPTS_VALUE;
1817 if (av_fifo_size(queue) > 0)
1818 av_fifo_generic_read(queue, ×tamp, sizeof(timestamp), NULL);
1819
1820 return timestamp;
1821 }
1822
nvenc_set_timestamp(AVCodecContext * avctx,NV_ENC_LOCK_BITSTREAM * params,AVPacket * pkt)1823 static int nvenc_set_timestamp(AVCodecContext *avctx,
1824 NV_ENC_LOCK_BITSTREAM *params,
1825 AVPacket *pkt)
1826 {
1827 NvencContext *ctx = avctx->priv_data;
1828
1829 pkt->pts = params->outputTimeStamp;
1830 pkt->dts = timestamp_queue_dequeue(ctx->timestamp_list);
1831
1832 pkt->dts -= FFMAX(avctx->max_b_frames, 0) * FFMAX(avctx->ticks_per_frame, 1);
1833
1834 return 0;
1835 }
1836
process_output_surface(AVCodecContext * avctx,AVPacket * pkt,NvencSurface * tmpoutsurf)1837 static int process_output_surface(AVCodecContext *avctx, AVPacket *pkt, NvencSurface *tmpoutsurf)
1838 {
1839 NvencContext *ctx = avctx->priv_data;
1840 NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
1841 NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
1842
1843 uint32_t slice_mode_data;
1844 uint32_t *slice_offsets = NULL;
1845 NV_ENC_LOCK_BITSTREAM lock_params = { 0 };
1846 NVENCSTATUS nv_status;
1847 int res = 0;
1848
1849 enum AVPictureType pict_type;
1850
1851 switch (avctx->codec->id) {
1852 case AV_CODEC_ID_H264:
1853 slice_mode_data = ctx->encode_config.encodeCodecConfig.h264Config.sliceModeData;
1854 break;
1855 case AV_CODEC_ID_H265:
1856 slice_mode_data = ctx->encode_config.encodeCodecConfig.hevcConfig.sliceModeData;
1857 break;
1858 default:
1859 av_log(avctx, AV_LOG_ERROR, "Unknown codec name\n");
1860 res = AVERROR(EINVAL);
1861 goto error;
1862 }
1863 slice_offsets = av_mallocz(slice_mode_data * sizeof(*slice_offsets));
1864
1865 if (!slice_offsets) {
1866 res = AVERROR(ENOMEM);
1867 goto error;
1868 }
1869
1870 lock_params.version = NV_ENC_LOCK_BITSTREAM_VER;
1871
1872 lock_params.doNotWait = 0;
1873 lock_params.outputBitstream = tmpoutsurf->output_surface;
1874 lock_params.sliceOffsets = slice_offsets;
1875
1876 nv_status = p_nvenc->nvEncLockBitstream(ctx->nvencoder, &lock_params);
1877 if (nv_status != NV_ENC_SUCCESS) {
1878 res = nvenc_print_error(avctx, nv_status, "Failed locking bitstream buffer");
1879 goto error;
1880 }
1881
1882 res = pkt->data ?
1883 ff_alloc_packet2(avctx, pkt, lock_params.bitstreamSizeInBytes, lock_params.bitstreamSizeInBytes) :
1884 av_new_packet(pkt, lock_params.bitstreamSizeInBytes);
1885
1886 if (res < 0) {
1887 p_nvenc->nvEncUnlockBitstream(ctx->nvencoder, tmpoutsurf->output_surface);
1888 goto error;
1889 }
1890
1891 memcpy(pkt->data, lock_params.bitstreamBufferPtr, lock_params.bitstreamSizeInBytes);
1892
1893 nv_status = p_nvenc->nvEncUnlockBitstream(ctx->nvencoder, tmpoutsurf->output_surface);
1894 if (nv_status != NV_ENC_SUCCESS) {
1895 res = nvenc_print_error(avctx, nv_status, "Failed unlocking bitstream buffer, expect the gates of mordor to open");
1896 goto error;
1897 }
1898
1899
1900 if (avctx->pix_fmt == AV_PIX_FMT_CUDA || avctx->pix_fmt == AV_PIX_FMT_D3D11) {
1901 ctx->registered_frames[tmpoutsurf->reg_idx].mapped -= 1;
1902 if (ctx->registered_frames[tmpoutsurf->reg_idx].mapped == 0) {
1903 nv_status = p_nvenc->nvEncUnmapInputResource(ctx->nvencoder, ctx->registered_frames[tmpoutsurf->reg_idx].in_map.mappedResource);
1904 if (nv_status != NV_ENC_SUCCESS) {
1905 res = nvenc_print_error(avctx, nv_status, "Failed unmapping input resource");
1906 goto error;
1907 }
1908 } else if (ctx->registered_frames[tmpoutsurf->reg_idx].mapped < 0) {
1909 res = AVERROR_BUG;
1910 goto error;
1911 }
1912
1913 av_frame_unref(tmpoutsurf->in_ref);
1914
1915 tmpoutsurf->input_surface = NULL;
1916 }
1917
1918 switch (lock_params.pictureType) {
1919 case NV_ENC_PIC_TYPE_IDR:
1920 pkt->flags |= AV_PKT_FLAG_KEY;
1921 case NV_ENC_PIC_TYPE_I:
1922 pict_type = AV_PICTURE_TYPE_I;
1923 break;
1924 case NV_ENC_PIC_TYPE_P:
1925 pict_type = AV_PICTURE_TYPE_P;
1926 break;
1927 case NV_ENC_PIC_TYPE_B:
1928 pict_type = AV_PICTURE_TYPE_B;
1929 break;
1930 case NV_ENC_PIC_TYPE_BI:
1931 pict_type = AV_PICTURE_TYPE_BI;
1932 break;
1933 default:
1934 av_log(avctx, AV_LOG_ERROR, "Unknown picture type encountered, expect the output to be broken.\n");
1935 av_log(avctx, AV_LOG_ERROR, "Please report this error and include as much information on how to reproduce it as possible.\n");
1936 res = AVERROR_EXTERNAL;
1937 goto error;
1938 }
1939
1940 #if FF_API_CODED_FRAME
1941 FF_DISABLE_DEPRECATION_WARNINGS
1942 avctx->coded_frame->pict_type = pict_type;
1943 FF_ENABLE_DEPRECATION_WARNINGS
1944 #endif
1945
1946 ff_side_data_set_encoder_stats(pkt,
1947 (lock_params.frameAvgQP - 1) * FF_QP2LAMBDA, NULL, 0, pict_type);
1948
1949 res = nvenc_set_timestamp(avctx, &lock_params, pkt);
1950 if (res < 0)
1951 goto error2;
1952
1953 av_free(slice_offsets);
1954
1955 return 0;
1956
1957 error:
1958 timestamp_queue_dequeue(ctx->timestamp_list);
1959
1960 error2:
1961 av_free(slice_offsets);
1962
1963 return res;
1964 }
1965
output_ready(AVCodecContext * avctx,int flush)1966 static int output_ready(AVCodecContext *avctx, int flush)
1967 {
1968 NvencContext *ctx = avctx->priv_data;
1969 int nb_ready, nb_pending;
1970
1971 nb_ready = av_fifo_size(ctx->output_surface_ready_queue) / sizeof(NvencSurface*);
1972 nb_pending = av_fifo_size(ctx->output_surface_queue) / sizeof(NvencSurface*);
1973 if (flush)
1974 return nb_ready > 0;
1975 return (nb_ready > 0) && (nb_ready + nb_pending >= ctx->async_depth);
1976 }
1977
reconfig_encoder(AVCodecContext * avctx,const AVFrame * frame)1978 static void reconfig_encoder(AVCodecContext *avctx, const AVFrame *frame)
1979 {
1980 NvencContext *ctx = avctx->priv_data;
1981 NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &ctx->nvenc_dload_funcs.nvenc_funcs;
1982 NVENCSTATUS ret;
1983
1984 NV_ENC_RECONFIGURE_PARAMS params = { 0 };
1985 int needs_reconfig = 0;
1986 int needs_encode_config = 0;
1987 int reconfig_bitrate = 0, reconfig_dar = 0;
1988 int dw, dh;
1989
1990 params.version = NV_ENC_RECONFIGURE_PARAMS_VER;
1991 params.reInitEncodeParams = ctx->init_encode_params;
1992
1993 compute_dar(avctx, &dw, &dh);
1994 if (dw != ctx->init_encode_params.darWidth || dh != ctx->init_encode_params.darHeight) {
1995 av_log(avctx, AV_LOG_VERBOSE,
1996 "aspect ratio change (DAR): %d:%d -> %d:%d\n",
1997 ctx->init_encode_params.darWidth,
1998 ctx->init_encode_params.darHeight, dw, dh);
1999
2000 params.reInitEncodeParams.darHeight = dh;
2001 params.reInitEncodeParams.darWidth = dw;
2002
2003 needs_reconfig = 1;
2004 reconfig_dar = 1;
2005 }
2006
2007 if (ctx->rc != NV_ENC_PARAMS_RC_CONSTQP && ctx->support_dyn_bitrate) {
2008 if (avctx->bit_rate > 0 && params.reInitEncodeParams.encodeConfig->rcParams.averageBitRate != avctx->bit_rate) {
2009 av_log(avctx, AV_LOG_VERBOSE,
2010 "avg bitrate change: %d -> %d\n",
2011 params.reInitEncodeParams.encodeConfig->rcParams.averageBitRate,
2012 (uint32_t)avctx->bit_rate);
2013
2014 params.reInitEncodeParams.encodeConfig->rcParams.averageBitRate = avctx->bit_rate;
2015 reconfig_bitrate = 1;
2016 }
2017
2018 if (avctx->rc_max_rate > 0 && ctx->encode_config.rcParams.maxBitRate != avctx->rc_max_rate) {
2019 av_log(avctx, AV_LOG_VERBOSE,
2020 "max bitrate change: %d -> %d\n",
2021 params.reInitEncodeParams.encodeConfig->rcParams.maxBitRate,
2022 (uint32_t)avctx->rc_max_rate);
2023
2024 params.reInitEncodeParams.encodeConfig->rcParams.maxBitRate = avctx->rc_max_rate;
2025 reconfig_bitrate = 1;
2026 }
2027
2028 if (avctx->rc_buffer_size > 0 && ctx->encode_config.rcParams.vbvBufferSize != avctx->rc_buffer_size) {
2029 av_log(avctx, AV_LOG_VERBOSE,
2030 "vbv buffer size change: %d -> %d\n",
2031 params.reInitEncodeParams.encodeConfig->rcParams.vbvBufferSize,
2032 avctx->rc_buffer_size);
2033
2034 params.reInitEncodeParams.encodeConfig->rcParams.vbvBufferSize = avctx->rc_buffer_size;
2035 reconfig_bitrate = 1;
2036 }
2037
2038 if (reconfig_bitrate) {
2039 params.resetEncoder = 1;
2040 params.forceIDR = 1;
2041
2042 needs_encode_config = 1;
2043 needs_reconfig = 1;
2044 }
2045 }
2046
2047 if (!needs_encode_config)
2048 params.reInitEncodeParams.encodeConfig = NULL;
2049
2050 if (needs_reconfig) {
2051 ret = p_nvenc->nvEncReconfigureEncoder(ctx->nvencoder, ¶ms);
2052 if (ret != NV_ENC_SUCCESS) {
2053 nvenc_print_error(avctx, ret, "failed to reconfigure nvenc");
2054 } else {
2055 if (reconfig_dar) {
2056 ctx->init_encode_params.darHeight = dh;
2057 ctx->init_encode_params.darWidth = dw;
2058 }
2059
2060 if (reconfig_bitrate) {
2061 ctx->encode_config.rcParams.averageBitRate = params.reInitEncodeParams.encodeConfig->rcParams.averageBitRate;
2062 ctx->encode_config.rcParams.maxBitRate = params.reInitEncodeParams.encodeConfig->rcParams.maxBitRate;
2063 ctx->encode_config.rcParams.vbvBufferSize = params.reInitEncodeParams.encodeConfig->rcParams.vbvBufferSize;
2064 }
2065
2066 }
2067 }
2068 }
2069
ff_nvenc_send_frame(AVCodecContext * avctx,const AVFrame * frame)2070 int ff_nvenc_send_frame(AVCodecContext *avctx, const AVFrame *frame)
2071 {
2072 NVENCSTATUS nv_status;
2073 NvencSurface *tmp_out_surf, *in_surf;
2074 int res, res2;
2075 NV_ENC_SEI_PAYLOAD *sei_data = NULL;
2076 size_t sei_size;
2077
2078 NvencContext *ctx = avctx->priv_data;
2079 NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
2080 NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
2081
2082 NV_ENC_PIC_PARAMS pic_params = { 0 };
2083 pic_params.version = NV_ENC_PIC_PARAMS_VER;
2084
2085 if ((!ctx->cu_context && !ctx->d3d11_device) || !ctx->nvencoder)
2086 return AVERROR(EINVAL);
2087
2088 if (ctx->encoder_flushing) {
2089 if (avctx->internal->draining)
2090 return AVERROR_EOF;
2091
2092 ctx->encoder_flushing = 0;
2093 av_fifo_reset(ctx->timestamp_list);
2094 }
2095
2096 if (frame) {
2097 in_surf = get_free_frame(ctx);
2098 if (!in_surf)
2099 return AVERROR(EAGAIN);
2100
2101 res = nvenc_push_context(avctx);
2102 if (res < 0)
2103 return res;
2104
2105 reconfig_encoder(avctx, frame);
2106
2107 res = nvenc_upload_frame(avctx, frame, in_surf);
2108
2109 res2 = nvenc_pop_context(avctx);
2110 if (res2 < 0)
2111 return res2;
2112
2113 if (res)
2114 return res;
2115
2116 pic_params.inputBuffer = in_surf->input_surface;
2117 pic_params.bufferFmt = in_surf->format;
2118 pic_params.inputWidth = in_surf->width;
2119 pic_params.inputHeight = in_surf->height;
2120 pic_params.inputPitch = in_surf->pitch;
2121 pic_params.outputBitstream = in_surf->output_surface;
2122
2123 if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2124 if (frame->top_field_first)
2125 pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FIELD_TOP_BOTTOM;
2126 else
2127 pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FIELD_BOTTOM_TOP;
2128 } else {
2129 pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FRAME;
2130 }
2131
2132 if (ctx->forced_idr >= 0 && frame->pict_type == AV_PICTURE_TYPE_I) {
2133 pic_params.encodePicFlags =
2134 ctx->forced_idr ? NV_ENC_PIC_FLAG_FORCEIDR : NV_ENC_PIC_FLAG_FORCEINTRA;
2135 } else {
2136 pic_params.encodePicFlags = 0;
2137 }
2138
2139 pic_params.inputTimeStamp = frame->pts;
2140
2141 if (ctx->a53_cc && av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC)) {
2142 if (ff_alloc_a53_sei(frame, sizeof(NV_ENC_SEI_PAYLOAD), (void**)&sei_data, &sei_size) < 0) {
2143 av_log(ctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2144 }
2145
2146 if (sei_data) {
2147 sei_data->payloadSize = (uint32_t)sei_size;
2148 sei_data->payloadType = 4;
2149 sei_data->payload = (uint8_t*)(sei_data + 1);
2150 }
2151 }
2152
2153 nvenc_codec_specific_pic_params(avctx, &pic_params, sei_data);
2154 } else {
2155 pic_params.encodePicFlags = NV_ENC_PIC_FLAG_EOS;
2156 ctx->encoder_flushing = 1;
2157 }
2158
2159 res = nvenc_push_context(avctx);
2160 if (res < 0)
2161 return res;
2162
2163 nv_status = p_nvenc->nvEncEncodePicture(ctx->nvencoder, &pic_params);
2164 av_free(sei_data);
2165
2166 res = nvenc_pop_context(avctx);
2167 if (res < 0)
2168 return res;
2169
2170 if (nv_status != NV_ENC_SUCCESS &&
2171 nv_status != NV_ENC_ERR_NEED_MORE_INPUT)
2172 return nvenc_print_error(avctx, nv_status, "EncodePicture failed!");
2173
2174 if (frame) {
2175 av_fifo_generic_write(ctx->output_surface_queue, &in_surf, sizeof(in_surf), NULL);
2176 timestamp_queue_enqueue(ctx->timestamp_list, frame->pts);
2177 }
2178
2179 /* all the pending buffers are now ready for output */
2180 if (nv_status == NV_ENC_SUCCESS) {
2181 while (av_fifo_size(ctx->output_surface_queue) > 0) {
2182 av_fifo_generic_read(ctx->output_surface_queue, &tmp_out_surf, sizeof(tmp_out_surf), NULL);
2183 av_fifo_generic_write(ctx->output_surface_ready_queue, &tmp_out_surf, sizeof(tmp_out_surf), NULL);
2184 }
2185 }
2186
2187 return 0;
2188 }
2189
ff_nvenc_receive_packet(AVCodecContext * avctx,AVPacket * pkt)2190 int ff_nvenc_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
2191 {
2192 NvencSurface *tmp_out_surf;
2193 int res, res2;
2194
2195 NvencContext *ctx = avctx->priv_data;
2196
2197 if ((!ctx->cu_context && !ctx->d3d11_device) || !ctx->nvencoder)
2198 return AVERROR(EINVAL);
2199
2200 if (output_ready(avctx, ctx->encoder_flushing)) {
2201 av_fifo_generic_read(ctx->output_surface_ready_queue, &tmp_out_surf, sizeof(tmp_out_surf), NULL);
2202
2203 res = nvenc_push_context(avctx);
2204 if (res < 0)
2205 return res;
2206
2207 res = process_output_surface(avctx, pkt, tmp_out_surf);
2208
2209 res2 = nvenc_pop_context(avctx);
2210 if (res2 < 0)
2211 return res2;
2212
2213 if (res)
2214 return res;
2215
2216 av_fifo_generic_write(ctx->unused_surface_queue, &tmp_out_surf, sizeof(tmp_out_surf), NULL);
2217 } else if (ctx->encoder_flushing) {
2218 return AVERROR_EOF;
2219 } else {
2220 return AVERROR(EAGAIN);
2221 }
2222
2223 return 0;
2224 }
2225
ff_nvenc_encode_frame(AVCodecContext * avctx,AVPacket * pkt,const AVFrame * frame,int * got_packet)2226 int ff_nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
2227 const AVFrame *frame, int *got_packet)
2228 {
2229 NvencContext *ctx = avctx->priv_data;
2230 int res;
2231
2232 if (!ctx->encoder_flushing) {
2233 res = ff_nvenc_send_frame(avctx, frame);
2234 if (res < 0)
2235 return res;
2236 }
2237
2238 res = ff_nvenc_receive_packet(avctx, pkt);
2239 if (res == AVERROR(EAGAIN) || res == AVERROR_EOF) {
2240 *got_packet = 0;
2241 } else if (res < 0) {
2242 return res;
2243 } else {
2244 *got_packet = 1;
2245 }
2246
2247 return 0;
2248 }
2249
ff_nvenc_encode_flush(AVCodecContext * avctx)2250 av_cold void ff_nvenc_encode_flush(AVCodecContext *avctx)
2251 {
2252 ff_nvenc_send_frame(avctx, NULL);
2253 }
2254