1 /*
2 *
3 * Copyright 2020 Rockchip Electronics Co., LTD.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 #define MODULE_TAG "vpu_api_mlvec"
19
20 #include "vpu_api_mlvec.h"
21 #include <fcntl.h>
22 #include "cstring"
23 #include "hdf_log.h"
24 #include "mpp_log.h"
25 #include "mpp_mem.h"
26 #include "mpp_common.h"
27 #include "vpu_api_legacy.h"
28 #include "vpu_mem_legacy.h"
29 #include "securec.h"
30
31 #define VPU_API_DBG_MLVEC_FUNC (0x00010000)
32 #define VPU_API_DBG_MLVEC_FLOW (0x00020000)
33
34 typedef struct VpuApiMlvecImpl_t {
35 MppCtx mpp;
36 MppApi *mpi;
37 MppEncCfg enc_cfg;
38
39 VpuApiMlvecStaticCfg st_cfg;
40 VpuApiMlvecDynamicCfg dy_cfg;
41 } VpuApiMlvecImpl;
42
vpu_api_mlvec_init(VpuApiMlvec * ctx)43 MPP_RET vpu_api_mlvec_init(VpuApiMlvec *ctx)
44 {
45 if (ctx == nullptr) {
46 HDF_LOGE("%s invalid nullptr input", __func__);
47 return MPP_ERR_NULL_PTR;
48 }
49
50 HDF_LOGD("enter %p", ctx);
51
52 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl*)(*(mRKMppApi.Hdimpp_osal_calloc))(__FUNCTION__, sizeof(VpuApiMlvecImpl));
53 if (impl == nullptr)
54 HDF_LOGE("%s failed to create MLVEC context", __func__);
55
56 /* default disable frame_qp setup */
57 impl->dy_cfg.frame_qp = -1;
58
59 *ctx = impl;
60
61 HDF_LOGD("leave %p %p", ctx, impl);
62 return (impl) ? (MPP_OK) : (MPP_NOK);
63 }
64
vpu_api_mlvec_deinit(VpuApiMlvec ctx)65 MPP_RET vpu_api_mlvec_deinit(VpuApiMlvec ctx)
66 {
67 HDF_LOGD("enter %p", ctx);
68 if (ctx) {
69 (*(mRKMppApi.Hdimpp_osal_free))(__FUNCTION__, ctx);
70 }
71 ctx = nullptr;
72 HDF_LOGD("leave %p", ctx);
73 return MPP_OK;
74 }
75
vpu_api_mlvec_setup(VpuApiMlvec ctx,MppCtx mpp,MppApi * mpi,MppEncCfg enc_cfg)76 MPP_RET vpu_api_mlvec_setup(VpuApiMlvec ctx, MppCtx mpp, MppApi *mpi, MppEncCfg enc_cfg)
77 {
78 if (ctx == nullptr || mpp == nullptr || mpi == nullptr || enc_cfg == nullptr) {
79 HDF_LOGE("%s invalid nullptr input ctx %p mpp %p mpi %p cfg %p",
80 __func__, ctx, mpp, mpi, enc_cfg);
81 return MPP_ERR_NULL_PTR;
82 }
83
84 HDF_LOGD("enter %p", ctx);
85
86 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl *)ctx;
87 impl->mpp = mpp;
88 impl->mpi = mpi;
89 impl->enc_cfg = enc_cfg;
90
91 HDF_LOGD("leave %p", ctx);
92
93 return MPP_OK;
94 }
95
vpu_api_mlvec_check_cfg(void * p)96 MPP_RET vpu_api_mlvec_check_cfg(void *p)
97 {
98 if (p == nullptr) {
99 HDF_LOGE("%s invalid nullptr input", __func__);
100 return MPP_ERR_NULL_PTR;
101 }
102
103 VpuApiMlvecStaticCfg *cfg = (VpuApiMlvecStaticCfg *)p;
104 RK_U32 magic = cfg->magic;
105 MPP_RET ret = MPP_OK;
106
107 if ((((magic >> 24) & 0xff) != MLVEC_MAGIC) || // (magic >> 24)
108 (((magic >> 16) & 0xff) != MLVEC_VERSION)) // (magic >> 16)
109 ret = MPP_NOK;
110
111 HDF_LOGD("check mlvec cfg magic %08x %s", magic,
112 (ret == MPP_OK) ? "success" : "failed");
113
114 return ret;
115 }
116
vpu_api_mlvec_set_st_cfg(VpuApiMlvec ctx,VpuApiMlvecStaticCfg * cfg)117 MPP_RET vpu_api_mlvec_set_st_cfg(VpuApiMlvec ctx, VpuApiMlvecStaticCfg *cfg)
118 {
119 if (ctx == nullptr || cfg == nullptr) {
120 HDF_LOGE("%s invalid nullptr input ctx %p cfg %p", __func__, ctx, cfg);
121 return MPP_ERR_NULL_PTR;
122 }
123
124 HDF_LOGD("enter ctx %p cfg %p", ctx, cfg);
125
126 /* check mlvec magic word */
127 if (vpu_api_mlvec_check_cfg(cfg))
128 return MPP_NOK;
129
130 MPP_RET ret = MPP_OK;
131 /* update static configure */
132 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl *)ctx;
133
134 if (memcpy_s(&impl->st_cfg, sizeof(impl->st_cfg), cfg, sizeof(impl->st_cfg)) != EOK) {
135 HDF_LOGE("%s memcpy_s no", __func__);
136 }
137 cfg = &impl->st_cfg;
138
139 /* get mpp context and check */
140 MppCtx mpp_ctx = impl->mpp;
141 MppApi *mpi = impl->mpi;
142 MppEncCfg enc_cfg = impl->enc_cfg;
143
144 /* start control mpp */
145 HDF_LOGD("hdr_on_idr %d", cfg->hdr_on_idr);
146 MppEncHeaderMode mode = cfg->hdr_on_idr ?
147 MPP_ENC_HEADER_MODE_EACH_IDR :
148 MPP_ENC_HEADER_MODE_DEFAULT;
149
150 ret = mpi->control(mpp_ctx, MPP_ENC_SET_HEADER_MODE, &mode);
151 if (ret)
152 HDF_LOGE("%s setup enc header mode %d failed ret %d", __func__, mode, ret);
153
154 HDF_LOGD("add_prefix %d", cfg->add_prefix);
155 (*(mRKMppApi.HdiMppEncCfgSetS32))(enc_cfg, "h264:prefix_mode", cfg->add_prefix);
156
157 HDF_LOGD("slice_mbs %d", cfg->slice_mbs);
158 if (cfg->slice_mbs) {
159 (*(mRKMppApi.HdiMppEncCfgSetU32))(enc_cfg, "split:mode", MPP_ENC_SPLIT_BY_CTU);
160 (*(mRKMppApi.HdiMppEncCfgSetU32))(enc_cfg, "split:arg", cfg->slice_mbs);
161 } else
162 (*(mRKMppApi.HdiMppEncCfgSetU32))(enc_cfg, "split:mode", MPP_ENC_SPLIT_NONE);
163
164 /* NOTE: ltr_frames is already configured */
165 vpu_api_mlvec_set_dy_max_tid(ctx, cfg->max_tid);
166
167 HDF_LOGD("leave ctx %p ret %d", ctx, ret);
168
169 return ret;
170 }
171
vpu_api_mlvec_set_dy_cfg(VpuApiMlvec ctx,VpuApiMlvecDynamicCfg * cfg,MppMeta meta)172 MPP_RET vpu_api_mlvec_set_dy_cfg(VpuApiMlvec ctx, VpuApiMlvecDynamicCfg *cfg, MppMeta meta)
173 {
174 if (ctx == nullptr || cfg == nullptr || meta == nullptr) {
175 HDF_LOGE("%s invalid nullptr input ctx %p cfg %p meta %p",
176 __func__, ctx, cfg, meta);
177 return MPP_ERR_NULL_PTR;
178 }
179
180 HDF_LOGD("enter ctx %p cfg %p meta %p", ctx, cfg, meta);
181
182 MPP_RET ret = MPP_OK;
183 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl *)ctx;
184 VpuApiMlvecDynamicCfg *dst = &impl->dy_cfg;
185
186 /* clear non-sticky flag first */
187 dst->mark_ltr = -1;
188 dst->use_ltr = -1;
189 /* frame qp and base layer pid is sticky flag */
190
191 /* update flags */
192 if (cfg->updated) {
193 if (cfg->updated & VPU_API_ENC_MARK_LTR_UPDATED)
194 dst->mark_ltr = cfg->mark_ltr;
195
196 if (cfg->updated & VPU_API_ENC_USE_LTR_UPDATED)
197 dst->use_ltr = cfg->use_ltr;
198
199 if (cfg->updated & VPU_API_ENC_FRAME_QP_UPDATED)
200 dst->frame_qp = cfg->frame_qp;
201
202 if (cfg->updated & VPU_API_ENC_BASE_PID_UPDATED)
203 dst->base_layer_pid = cfg->base_layer_pid;
204
205 /* dynamic max temporal layer count updated go through mpp ref cfg */
206 cfg->updated = 0;
207 }
208
209 HDF_LOGD("ltr mark %2d use %2d frm qp %2d blpid %d", dst->mark_ltr,
210 dst->use_ltr, dst->frame_qp, dst->base_layer_pid);
211
212 /* setup next frame configure */
213 if (dst->mark_ltr >= 0)
214 (*(mRKMppApi.Hdimpp_meta_set_s32))(meta, KEY_ENC_MARK_LTR, dst->mark_ltr);
215
216 if (dst->use_ltr >= 0)
217 (*(mRKMppApi.Hdimpp_meta_set_s32))(meta, KEY_ENC_USE_LTR, dst->use_ltr);
218
219 if (dst->frame_qp >= 0)
220 (*(mRKMppApi.Hdimpp_meta_set_s32))(meta, KEY_ENC_FRAME_QP, dst->frame_qp);
221
222 if (dst->base_layer_pid >= 0)
223 (*(mRKMppApi.Hdimpp_meta_set_s32))(meta, KEY_ENC_BASE_LAYER_PID, dst->base_layer_pid);
224
225 HDF_LOGD("leave ctx %p ret %d", ctx, ret);
226
227 return ret;
228 }
229
vpu_api_mlvec_set_dy_max_tid(VpuApiMlvec ctx,RK_S32 max_tid)230 MPP_RET vpu_api_mlvec_set_dy_max_tid(VpuApiMlvec ctx, RK_S32 max_tid)
231 {
232 if (ctx == nullptr) {
233 HDF_LOGE("%s invalid nullptr input", __func__);
234 return MPP_ERR_NULL_PTR;
235 }
236
237 HDF_LOGD("enter ctx %p max_tid %d", ctx, max_tid);
238
239 MPP_RET ret = MPP_OK;
240 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl *)ctx;
241 MppCtx mpp_ctx = impl->mpp;
242 MppApi *mpi = impl->mpi;
243
244 MppEncRefLtFrmCfg lt_ref[16];
245 MppEncRefStFrmCfg st_ref[16];
246 RK_S32 lt_cfg_cnt = 0;
247 RK_S32 st_cfg_cnt = 0;
248 RK_S32 tid0_loop = 0;
249 RK_S32 ltr_frames = impl->st_cfg.ltr_frames;
250
251 memset_s(lt_ref, sizeof(lt_ref), 0, sizeof(lt_ref));
252 memset_s(st_ref, sizeof(st_ref), 0, sizeof(st_ref));
253
254 HDF_LOGD("ltr_frames %d, max_tid %d", ltr_frames, max_tid);
255
256 switch (max_tid) {
257 case 0 : {
258 st_ref[0].is_non_ref = 0;
259 st_ref[0].temporal_id = 0;
260 st_ref[0].ref_mode = REF_TO_PREV_REF_FRM;
261 st_ref[0].ref_arg = 0;
262 st_ref[0].repeat = 0;
263
264 st_cfg_cnt = 1;
265 tid0_loop = 1;
266 } break;
267 case 1 : {
268 /* set tsvc2 st-ref struct */
269 /* st 0 layer 0 - ref */
270 st_ref[0].is_non_ref = 0;
271 st_ref[0].temporal_id = 0;
272 st_ref[0].ref_mode = REF_TO_PREV_REF_FRM;
273 st_ref[0].ref_arg = 0;
274 st_ref[0].repeat = 0;
275 /* st 1 layer 1 - non-ref */
276 st_ref[1].is_non_ref = 1;
277 st_ref[1].temporal_id = 1;
278 st_ref[1].ref_mode = REF_TO_PREV_REF_FRM;
279 st_ref[1].ref_arg = 0;
280 st_ref[1].repeat = 0;
281 /* st 2 layer 0 - ref */
282 st_ref[2].is_non_ref = 0; // st 2 layer 0 - ref
283 st_ref[2].temporal_id = 0; // st 2 layer 0 - ref
284 st_ref[2].ref_mode = REF_TO_PREV_REF_FRM; // st 2 layer 0 - ref
285 st_ref[2].ref_arg = 0; // st 2 layer 0 - ref
286 st_ref[2].repeat = 0; // st 2 layer 0 - ref
287
288 st_cfg_cnt = 3; // st_cfg_cnt = 3
289 tid0_loop = 2; // tid0_loop = 2
290 } break;
291 case 2 : { // st 2 layer
292 /* set tsvc3 st-ref struct */
293 /* st 0 layer 0 - ref */
294 st_ref[0].is_non_ref = 0;
295 st_ref[0].temporal_id = 0;
296 st_ref[0].ref_mode = REF_TO_TEMPORAL_LAYER;
297 st_ref[0].ref_arg = 0;
298 st_ref[0].repeat = 0;
299 /* st 1 layer 2 - non-ref */
300 st_ref[1].is_non_ref = 0;
301 st_ref[1].temporal_id = 2; // layer 2
302 st_ref[1].ref_mode = REF_TO_TEMPORAL_LAYER;
303 st_ref[1].ref_arg = 0;
304 st_ref[1].repeat = 0;
305 /* st 2 layer 1 - ref */
306 st_ref[2].is_non_ref = 0; // layer 2
307 st_ref[2].temporal_id = 1; // layer 2
308 st_ref[2].ref_mode = REF_TO_TEMPORAL_LAYER; // layer 2
309 st_ref[2].ref_arg = 0; // layer 2
310 st_ref[2].repeat = 0; // layer 2
311 /* st 3 layer 2 - non-ref */
312 st_ref[3].is_non_ref = 0; // layer 3
313 st_ref[3].temporal_id = 2; // layer 2 3
314 st_ref[3].ref_mode = REF_TO_TEMPORAL_LAYER; // layer 3
315 st_ref[3].ref_arg = 1; // layer 3
316 st_ref[3].repeat = 0; // layer 3
317 /* st 4 layer 0 - ref */
318 st_ref[4].is_non_ref = 0; // layer 4
319 st_ref[4].temporal_id = 0; // layer 4
320 st_ref[4].ref_mode = REF_TO_TEMPORAL_LAYER; // layer 4
321 st_ref[4].ref_arg = 0; // layer 4
322 st_ref[4].repeat = 0; // layer 4
323
324 st_cfg_cnt = 5; // st_cfg_cnt = 5
325 tid0_loop = 4; // tid0_loop = 4
326 } break;
327 case 3 : { // set tsvc3
328 /* set tsvc3 st-ref struct */
329 /* st 0 layer 0 - ref */
330 st_ref[0].is_non_ref = 0;
331 st_ref[0].temporal_id = 0;
332 st_ref[0].ref_mode = REF_TO_TEMPORAL_LAYER;
333 st_ref[0].ref_arg = 0;
334 st_ref[0].repeat = 0;
335 /* st 1 layer 3 - non-ref */
336 st_ref[1].is_non_ref = 1;
337 st_ref[1].temporal_id = 3; // layer 3
338 st_ref[1].ref_mode = REF_TO_PREV_REF_FRM;
339 st_ref[1].ref_arg = 0;
340 st_ref[1].repeat = 0;
341 /* st 2 layer 2 - ref */
342 st_ref[2].is_non_ref = 0; // st 2
343 st_ref[2].temporal_id = 2; // st 2
344 st_ref[2].ref_mode = REF_TO_PREV_REF_FRM; // st 2
345 st_ref[2].ref_arg = 0; // st 2
346 st_ref[2].repeat = 0; // st 2
347 /* st 3 layer 3 - non-ref */
348 st_ref[3].is_non_ref = 1; // st 3
349 st_ref[3].temporal_id = 3; // st 3
350 st_ref[3].ref_mode = REF_TO_PREV_REF_FRM; // st 3
351 st_ref[3].ref_arg = 0; // st 3
352 st_ref[3].repeat = 0; // st 3
353 /* st 4 layer 1 - ref */
354 st_ref[4].is_non_ref = 0; // st 4
355 st_ref[4].temporal_id = 1; // st 4
356 st_ref[4].ref_mode = REF_TO_TEMPORAL_LAYER; // st 4
357 st_ref[4].ref_arg = 0; // st 4
358 st_ref[4].repeat = 0; // st 4
359 /* st 5 layer 3 - non-ref */
360 st_ref[5].is_non_ref = 1; // st 5
361 st_ref[5].temporal_id = 3; // st 5 layer 3
362 st_ref[5].ref_mode = REF_TO_PREV_REF_FRM; // st 5
363 st_ref[5].ref_arg = 0; // st 5
364 st_ref[5].repeat = 0; // st 5
365 /* st 6 layer 2 - ref */
366 st_ref[6].is_non_ref = 0; // st 6
367 st_ref[6].temporal_id = 2; // st 6 layer 2
368 st_ref[6].ref_mode = REF_TO_PREV_REF_FRM; // st 6
369 st_ref[6].ref_arg = 0; // st 6
370 st_ref[6].repeat = 0; // st 6
371 /* st 7 layer 3 - non-ref */
372 st_ref[7].is_non_ref = 1; // st 7
373 st_ref[7].temporal_id = 3; // st 7 layer 3
374 st_ref[7].ref_mode = REF_TO_PREV_REF_FRM; // st 7
375 st_ref[7].ref_arg = 0; // st 7
376 st_ref[7].repeat = 0; // st 7
377 /* st 8 layer 0 - ref */
378 st_ref[8].is_non_ref = 0; // st 8
379 st_ref[8].temporal_id = 0; // st 8
380 st_ref[8].ref_mode = REF_TO_PREV_REF_FRM; // st 8 layer 0 - ref
381 st_ref[8].ref_arg = 0; // st 8
382 st_ref[8].repeat = 0; // st 8
383
384 st_cfg_cnt = 9; // st_cfg_cnt = 9
385 tid0_loop = 8; // tid0_loop = 8
386 } break;
387 default : {
388 HDF_LOGE("%s invalid max temporal layer id %d", __func__, max_tid);
389 } break;
390 }
391
392 if (ltr_frames) {
393 RK_S32 i;
394
395 lt_cfg_cnt = ltr_frames;
396 for (i = 0; i < ltr_frames; i++) {
397 lt_ref[i].lt_idx = i;
398 lt_ref[i].temporal_id = 0;
399 lt_ref[i].ref_mode = REF_TO_PREV_LT_REF;
400 lt_ref[i].lt_gap = 0;
401 lt_ref[i].lt_delay = tid0_loop * i;
402 }
403 }
404
405 HDF_LOGD("lt_cfg_cnt %d st_cfg_cnt %d", lt_cfg_cnt, st_cfg_cnt);
406 if (lt_cfg_cnt || st_cfg_cnt) {
407 MppEncRefCfg ref = nullptr;
408
409 (*(mRKMppApi.HdiMppEncRefCfgInit))(&ref);
410
411 ret = (*(mRKMppApi.Hdimpp_enc_ref_cfg_set_cfg_cnt))(ref, lt_cfg_cnt, st_cfg_cnt);
412 ret = (*(mRKMppApi.Hdimpp_enc_ref_cfg_add_lt_cfg))(ref, lt_cfg_cnt, lt_ref);
413 ret = (*(mRKMppApi.Hdimpp_enc_ref_cfg_add_st_cfg))(ref, st_cfg_cnt, st_ref);
414 ret = (*(mRKMppApi.Hdimpp_enc_ref_cfg_set_keep_cpb))(ref, 1);
415 ret = (*(mRKMppApi.Hdimpp_enc_ref_cfg_check))(ref);
416
417 ret = mpi->control(mpp_ctx, MPP_ENC_SET_REF_CFG, ref);
418 if (ret)
419 HDF_LOGE("%s mpi control enc set ref cfg failed ret %d", __func__, ret);
420
421 (*(mRKMppApi.HdiMppEncRefCfgDeinit))(&ref);
422 } else {
423 ret = mpi->control(mpp_ctx, MPP_ENC_SET_REF_CFG, nullptr);
424 if (ret)
425 HDF_LOGE("%s mpi control enc set ref cfg failed ret %d", __func__, ret);
426 }
427
428 HDF_LOGD("leave ctx %p ret %d", ctx, ret);
429
430 return ret;
431 }
432