1 /*
2 * VP9 HW decode acceleration through NVDEC
3 *
4 * Copyright (c) 2016 Timo Rothenpieler
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include "libavutil/pixdesc.h"
24
25 #include "avcodec.h"
26 #include "nvdec.h"
27 #include "decode.h"
28 #include "internal.h"
29 #include "vp9shared.h"
30
nvdec_vp9_start_frame(AVCodecContext * avctx,const uint8_t * buffer,uint32_t size)31 static int nvdec_vp9_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
32 {
33 VP9SharedContext *h = avctx->priv_data;
34 const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
35
36 NVDECContext *ctx = avctx->internal->hwaccel_priv_data;
37 CUVIDPICPARAMS *pp = &ctx->pic_params;
38 CUVIDVP9PICPARAMS *ppc = &pp->CodecSpecific.vp9;
39 FrameDecodeData *fdd;
40 NVDECFrame *cf;
41 AVFrame *cur_frame = h->frames[CUR_FRAME].tf.f;
42
43 int ret, i;
44
45 ret = ff_nvdec_start_frame(avctx, cur_frame);
46 if (ret < 0)
47 return ret;
48
49 fdd = (FrameDecodeData*)cur_frame->private_ref->data;
50 cf = (NVDECFrame*)fdd->hwaccel_priv;
51
52 *pp = (CUVIDPICPARAMS) {
53 .PicWidthInMbs = (cur_frame->width + 15) / 16,
54 .FrameHeightInMbs = (cur_frame->height + 15) / 16,
55 .CurrPicIdx = cf->idx,
56
57 .CodecSpecific.vp9 = {
58 .width = cur_frame->width,
59 .height = cur_frame->height,
60
61 .LastRefIdx = ff_nvdec_get_ref_idx(h->refs[h->h.refidx[0]].f),
62 .GoldenRefIdx = ff_nvdec_get_ref_idx(h->refs[h->h.refidx[1]].f),
63 .AltRefIdx = ff_nvdec_get_ref_idx(h->refs[h->h.refidx[2]].f),
64
65 .profile = h->h.profile,
66 .frameContextIdx = h->h.framectxid,
67 .frameType = !h->h.keyframe,
68 .showFrame = !h->h.invisible,
69 .errorResilient = h->h.errorres,
70 .frameParallelDecoding = h->h.parallelmode,
71 .subSamplingX = pixdesc->log2_chroma_w,
72 .subSamplingY = pixdesc->log2_chroma_h,
73 .intraOnly = h->h.intraonly,
74 .allow_high_precision_mv = h->h.keyframe ? 0 : h->h.highprecisionmvs,
75 .refreshEntropyProbs = h->h.refreshctx,
76
77 .bitDepthMinus8Luma = pixdesc->comp[0].depth - 8,
78 .bitDepthMinus8Chroma = pixdesc->comp[1].depth - 8,
79
80 .loopFilterLevel = h->h.filter.level,
81 .loopFilterSharpness = h->h.filter.sharpness,
82 .modeRefLfEnabled = h->h.lf_delta.enabled,
83
84 .log2_tile_columns = h->h.tiling.log2_tile_cols,
85 .log2_tile_rows = h->h.tiling.log2_tile_rows,
86
87 .segmentEnabled = h->h.segmentation.enabled,
88 .segmentMapUpdate = h->h.segmentation.update_map,
89 .segmentMapTemporalUpdate = h->h.segmentation.temporal,
90 .segmentFeatureMode = h->h.segmentation.absolute_vals,
91
92 .qpYAc = h->h.yac_qi,
93 .qpYDc = h->h.ydc_qdelta,
94 .qpChDc = h->h.uvdc_qdelta,
95 .qpChAc = h->h.uvac_qdelta,
96
97 .resetFrameContext = h->h.resetctx,
98 .mcomp_filter_type = h->h.filtermode ^ (h->h.filtermode <= 1),
99
100 .frameTagSize = h->h.uncompressed_header_size,
101 .offsetToDctParts = h->h.compressed_header_size,
102
103 .refFrameSignBias[0] = 0,
104 }
105 };
106
107 for (i = 0; i < 2; i++)
108 ppc->mbModeLfDelta[i] = h->h.lf_delta.mode[i];
109
110 for (i = 0; i < 4; i++)
111 ppc->mbRefLfDelta[i] = h->h.lf_delta.ref[i];
112
113 for (i = 0; i < 7; i++)
114 ppc->mb_segment_tree_probs[i] = h->h.segmentation.prob[i];
115
116 for (i = 0; i < 3; i++) {
117 ppc->activeRefIdx[i] = h->h.refidx[i];
118 ppc->segment_pred_probs[i] = h->h.segmentation.pred_prob[i];
119 ppc->refFrameSignBias[i + 1] = h->h.signbias[i];
120 }
121
122 for (i = 0; i < 8; i++) {
123 ppc->segmentFeatureEnable[i][0] = h->h.segmentation.feat[i].q_enabled;
124 ppc->segmentFeatureEnable[i][1] = h->h.segmentation.feat[i].lf_enabled;
125 ppc->segmentFeatureEnable[i][2] = h->h.segmentation.feat[i].ref_enabled;
126 ppc->segmentFeatureEnable[i][3] = h->h.segmentation.feat[i].skip_enabled;
127
128 ppc->segmentFeatureData[i][0] = h->h.segmentation.feat[i].q_val;
129 ppc->segmentFeatureData[i][1] = h->h.segmentation.feat[i].lf_val;
130 ppc->segmentFeatureData[i][2] = h->h.segmentation.feat[i].ref_val;
131 ppc->segmentFeatureData[i][3] = 0;
132 }
133
134 switch (avctx->colorspace) {
135 default:
136 case AVCOL_SPC_UNSPECIFIED:
137 ppc->colorSpace = 0;
138 break;
139 case AVCOL_SPC_BT470BG:
140 ppc->colorSpace = 1;
141 break;
142 case AVCOL_SPC_BT709:
143 ppc->colorSpace = 2;
144 break;
145 case AVCOL_SPC_SMPTE170M:
146 ppc->colorSpace = 3;
147 break;
148 case AVCOL_SPC_SMPTE240M:
149 ppc->colorSpace = 4;
150 break;
151 case AVCOL_SPC_BT2020_NCL:
152 ppc->colorSpace = 5;
153 break;
154 case AVCOL_SPC_RESERVED:
155 ppc->colorSpace = 6;
156 break;
157 case AVCOL_SPC_RGB:
158 ppc->colorSpace = 7;
159 break;
160 }
161
162 return 0;
163 }
164
nvdec_vp9_frame_params(AVCodecContext * avctx,AVBufferRef * hw_frames_ctx)165 static int nvdec_vp9_frame_params(AVCodecContext *avctx,
166 AVBufferRef *hw_frames_ctx)
167 {
168 // VP9 uses a fixed size pool of 8 possible reference frames
169 return ff_nvdec_frame_params(avctx, hw_frames_ctx, 8, 0);
170 }
171
172 const AVHWAccel ff_vp9_nvdec_hwaccel = {
173 .name = "vp9_nvdec",
174 .type = AVMEDIA_TYPE_VIDEO,
175 .id = AV_CODEC_ID_VP9,
176 .pix_fmt = AV_PIX_FMT_CUDA,
177 .start_frame = nvdec_vp9_start_frame,
178 .end_frame = ff_nvdec_simple_end_frame,
179 .decode_slice = ff_nvdec_simple_decode_slice,
180 .frame_params = nvdec_vp9_frame_params,
181 .init = ff_nvdec_decode_init,
182 .uninit = ff_nvdec_decode_uninit,
183 .priv_data_size = sizeof(NVDECContext),
184 };
185