1 /*
2 *
3 * Copyright 2020 Rockchip Electronics Co., LTD.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 #define MODULE_TAG "vpu_api_mlvec"
19
20 #include "vpu_api_mlvec.h"
21 #include <fcntl.h>
22 #include "cstring"
23 #include "hdf_log.h"
24 #include "mpp_log.h"
25 #include "mpp_mem.h"
26 #include "mpp_common.h"
27 #include "vpu_api_legacy.h"
28 #include "vpu_mem_legacy.h"
29 #include "securec.h"
30
31 #define VPU_API_DBG_MLVEC_FUNC (0x00010000)
32 #define VPU_API_DBG_MLVEC_FLOW (0x00020000)
33
34 typedef struct VpuApiMlvecImpl_t {
35 MppCtx mpp;
36 MppApi *mpi;
37 MppEncCfg enc_cfg;
38
39 VpuApiMlvecStaticCfg st_cfg;
40 VpuApiMlvecDynamicCfg dy_cfg;
41 } VpuApiMlvecImpl;
42
vpu_api_mlvec_init(VpuApiMlvec * ctx)43 MPP_RET vpu_api_mlvec_init(VpuApiMlvec *ctx)
44 {
45 if (ctx == nullptr) {
46 HDF_LOGE("invalid nullptr input\n");
47 return MPP_ERR_NULL_PTR;
48 }
49
50 HDF_LOGE("enter %p\n", ctx);
51
52 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl*)(*(mRKMppApi.Hdimpp_osal_calloc))(__FUNCTION__, sizeof(VpuApiMlvecImpl));
53 if (impl == nullptr)
54 HDF_LOGE("failed to create MLVEC context\n");
55
56 /* default disable frame_qp setup */
57 impl->dy_cfg.frame_qp = -1;
58
59 *ctx = impl;
60
61 HDF_LOGE("leave %p %p\n", ctx, impl);
62 return (impl) ? (MPP_OK) : (MPP_NOK);
63 }
64
vpu_api_mlvec_deinit(VpuApiMlvec ctx)65 MPP_RET vpu_api_mlvec_deinit(VpuApiMlvec ctx)
66 {
67 HDF_LOGE("enter %p\n", ctx);
68 if (ctx) {
69 (*(mRKMppApi.Hdimpp_osal_free))(__FUNCTION__, ctx);
70 }
71 ctx = nullptr;
72 HDF_LOGE("leave %p\n", ctx);
73 return MPP_OK;
74 }
75
vpu_api_mlvec_setup(VpuApiMlvec ctx,MppCtx mpp,MppApi * mpi,MppEncCfg enc_cfg)76 MPP_RET vpu_api_mlvec_setup(VpuApiMlvec ctx, MppCtx mpp, MppApi *mpi, MppEncCfg enc_cfg)
77 {
78 if (ctx == nullptr || mpp == nullptr || mpi == nullptr || enc_cfg == nullptr) {
79 HDF_LOGE("invalid nullptr input ctx %p mpp %p mpi %p cfg %p\n",
80 ctx, mpp, mpi, enc_cfg);
81 return MPP_ERR_NULL_PTR;
82 }
83
84 HDF_LOGE("enter %p\n", ctx);
85
86 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl *)ctx;
87 impl->mpp = mpp;
88 impl->mpi = mpi;
89 impl->enc_cfg = enc_cfg;
90
91 HDF_LOGE("leave %p\n", ctx);
92
93 return MPP_OK;
94 }
95
vpu_api_mlvec_check_cfg(void * p)96 MPP_RET vpu_api_mlvec_check_cfg(void *p)
97 {
98 if (p == nullptr) {
99 HDF_LOGE("invalid nullptr input\n");
100 return MPP_ERR_NULL_PTR;
101 }
102
103 VpuApiMlvecStaticCfg *cfg = (VpuApiMlvecStaticCfg *)p;
104 RK_U32 magic = cfg->magic;
105 MPP_RET ret = MPP_OK;
106
107 if ((((magic >> 24) & 0xff) != MLVEC_MAGIC) || // (magic >> 24)
108 (((magic >> 16) & 0xff) != MLVEC_VERSION)) // (magic >> 16)
109 ret = MPP_NOK;
110
111 HDF_LOGE("check mlvec cfg magic %08x %s\n", magic,
112 (ret == MPP_OK) ? "success" : "failed");
113
114 return ret;
115 }
116
vpu_api_mlvec_set_st_cfg(VpuApiMlvec ctx,VpuApiMlvecStaticCfg * cfg)117 MPP_RET vpu_api_mlvec_set_st_cfg(VpuApiMlvec ctx, VpuApiMlvecStaticCfg *cfg)
118 {
119 if (ctx == nullptr || cfg == nullptr) {
120 HDF_LOGE("invalid nullptr input ctx %p cfg %p\n");
121 return MPP_ERR_NULL_PTR;
122 }
123
124 HDF_LOGE("enter ctx %p cfg %p\n", ctx, cfg);
125
126 /* check mlvec magic word */
127 if (vpu_api_mlvec_check_cfg(cfg))
128 return MPP_NOK;
129
130 MPP_RET ret = MPP_OK;
131 /* update static configure */
132 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl *)ctx;
133
134 if (memcpy_s(&impl->st_cfg, sizeof(impl->st_cfg), cfg, sizeof(impl->st_cfg)) != EOK) {
135 HDF_LOGE("memcpy_s no");
136 }
137 cfg = &impl->st_cfg;
138
139 /* get mpp context and check */
140 MppCtx mpp_ctx = impl->mpp;
141 MppApi *mpi = impl->mpi;
142 MppEncCfg enc_cfg = impl->enc_cfg;
143
144 /* start control mpp */
145 HDF_LOGE("hdr_on_idr %d\n", cfg->hdr_on_idr);
146 MppEncHeaderMode mode = cfg->hdr_on_idr ?
147 MPP_ENC_HEADER_MODE_EACH_IDR :
148 MPP_ENC_HEADER_MODE_DEFAULT;
149
150 ret = mpi->control(mpp_ctx, MPP_ENC_SET_HEADER_MODE, &mode);
151 if (ret)
152 HDF_LOGE("setup enc header mode %d failed ret %d\n", mode, ret);
153
154 HDF_LOGE("add_prefix %d\n", cfg->add_prefix);
155 (*(mRKMppApi.HdiMppEncCfgSetS32))(enc_cfg, "h264:prefix_mode", cfg->add_prefix);
156
157 HDF_LOGE("slice_mbs %d\n", cfg->slice_mbs);
158 if (cfg->slice_mbs) {
159 (*(mRKMppApi.HdiMppEncCfgSetU32))(enc_cfg, "split:mode", MPP_ENC_SPLIT_BY_CTU);
160 (*(mRKMppApi.HdiMppEncCfgSetU32))(enc_cfg, "split:arg", cfg->slice_mbs);
161 } else
162 (*(mRKMppApi.HdiMppEncCfgSetU32))(enc_cfg, "split:mode", MPP_ENC_SPLIT_NONE);
163
164 /* NOTE: ltr_frames is already configured */
165 vpu_api_mlvec_set_dy_max_tid(ctx, cfg->max_tid);
166
167 HDF_LOGE("leave ctx %p ret %d\n", ctx, ret);
168
169 return ret;
170 }
171
vpu_api_mlvec_set_dy_cfg(VpuApiMlvec ctx,VpuApiMlvecDynamicCfg * cfg,MppMeta meta)172 MPP_RET vpu_api_mlvec_set_dy_cfg(VpuApiMlvec ctx, VpuApiMlvecDynamicCfg *cfg, MppMeta meta)
173 {
174 if (ctx == nullptr || cfg == nullptr || meta == nullptr) {
175 HDF_LOGE("invalid nullptr input ctx %p cfg %p meta %p\n",
176 ctx, cfg, meta);
177 return MPP_ERR_NULL_PTR;
178 }
179
180 HDF_LOGE("enter ctx %p cfg %p meta %p\n", ctx, cfg, meta);
181
182 MPP_RET ret = MPP_OK;
183 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl *)ctx;
184 VpuApiMlvecDynamicCfg *dst = &impl->dy_cfg;
185
186 /* clear non-sticky flag first */
187 dst->mark_ltr = -1;
188 dst->use_ltr = -1;
189 /* frame qp and base layer pid is sticky flag */
190
191 /* update flags */
192 if (cfg->updated) {
193 if (cfg->updated & VPU_API_ENC_MARK_LTR_UPDATED)
194 dst->mark_ltr = cfg->mark_ltr;
195
196 if (cfg->updated & VPU_API_ENC_USE_LTR_UPDATED)
197 dst->use_ltr = cfg->use_ltr;
198
199 if (cfg->updated & VPU_API_ENC_FRAME_QP_UPDATED)
200 dst->frame_qp = cfg->frame_qp;
201
202 if (cfg->updated & VPU_API_ENC_BASE_PID_UPDATED)
203 dst->base_layer_pid = cfg->base_layer_pid;
204
205 /* dynamic max temporal layer count updated go through mpp ref cfg */
206 cfg->updated = 0;
207 }
208
209 HDF_LOGE("ltr mark %2d use %2d frm qp %2d blpid %d\n", dst->mark_ltr,
210 dst->use_ltr, dst->frame_qp, dst->base_layer_pid);
211
212 /* setup next frame configure */
213 if (dst->mark_ltr >= 0)
214 (*(mRKMppApi.Hdimpp_meta_set_s32))(meta, KEY_ENC_MARK_LTR, dst->mark_ltr);
215
216 if (dst->use_ltr >= 0)
217 (*(mRKMppApi.Hdimpp_meta_set_s32))(meta, KEY_ENC_USE_LTR, dst->use_ltr);
218
219 if (dst->frame_qp >= 0)
220 (*(mRKMppApi.Hdimpp_meta_set_s32))(meta, KEY_ENC_FRAME_QP, dst->frame_qp);
221
222 if (dst->base_layer_pid >= 0)
223 (*(mRKMppApi.Hdimpp_meta_set_s32))(meta, KEY_ENC_BASE_LAYER_PID, dst->base_layer_pid);
224
225 HDF_LOGE("leave ctx %p ret %d\n", ctx, ret);
226
227 return ret;
228 }
229
vpu_api_mlvec_set_dy_max_tid(VpuApiMlvec ctx,RK_S32 max_tid)230 MPP_RET vpu_api_mlvec_set_dy_max_tid(VpuApiMlvec ctx, RK_S32 max_tid)
231 {
232 if (ctx == nullptr) {
233 HDF_LOGE("invalid nullptr input\n");
234 return MPP_ERR_NULL_PTR;
235 }
236
237 HDF_LOGE("enter ctx %p max_tid %d\n", ctx, max_tid);
238
239 MPP_RET ret = MPP_OK;
240 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl *)ctx;
241 MppCtx mpp_ctx = impl->mpp;
242 MppApi *mpi = impl->mpi;
243
244 MppEncRefLtFrmCfg lt_ref[16];
245 MppEncRefStFrmCfg st_ref[16];
246 RK_S32 lt_cfg_cnt = 0;
247 RK_S32 st_cfg_cnt = 0;
248 RK_S32 tid0_loop = 0;
249 RK_S32 ltr_frames = impl->st_cfg.ltr_frames;
250
251 memset_s(lt_ref, sizeof(lt_ref), 0, sizeof(lt_ref));
252 memset_s(st_ref, sizeof(st_ref), 0, sizeof(st_ref));
253
254 HDF_LOGE("ltr_frames %d\n", ltr_frames);
255 HDF_LOGE("max_tid %d\n", max_tid);
256
257 switch (max_tid) {
258 case 0 : {
259 st_ref[0].is_non_ref = 0;
260 st_ref[0].temporal_id = 0;
261 st_ref[0].ref_mode = REF_TO_PREV_REF_FRM;
262 st_ref[0].ref_arg = 0;
263 st_ref[0].repeat = 0;
264
265 st_cfg_cnt = 1;
266 tid0_loop = 1;
267 HDF_LOGE("no tsvc\n");
268 } break;
269 case 1 : {
270 /* set tsvc2 st-ref struct */
271 /* st 0 layer 0 - ref */
272 st_ref[0].is_non_ref = 0;
273 st_ref[0].temporal_id = 0;
274 st_ref[0].ref_mode = REF_TO_PREV_REF_FRM;
275 st_ref[0].ref_arg = 0;
276 st_ref[0].repeat = 0;
277 /* st 1 layer 1 - non-ref */
278 st_ref[1].is_non_ref = 1;
279 st_ref[1].temporal_id = 1;
280 st_ref[1].ref_mode = REF_TO_PREV_REF_FRM;
281 st_ref[1].ref_arg = 0;
282 st_ref[1].repeat = 0;
283 /* st 2 layer 0 - ref */
284 st_ref[2].is_non_ref = 0; // st 2 layer 0 - ref
285 st_ref[2].temporal_id = 0; // st 2 layer 0 - ref
286 st_ref[2].ref_mode = REF_TO_PREV_REF_FRM; // st 2 layer 0 - ref
287 st_ref[2].ref_arg = 0; // st 2 layer 0 - ref
288 st_ref[2].repeat = 0; // st 2 layer 0 - ref
289
290 st_cfg_cnt = 3; // st_cfg_cnt = 3
291 tid0_loop = 2; // tid0_loop = 2
292 HDF_LOGE("tsvc2\n");
293 } break;
294 case 2 : { // st 2 layer
295 /* set tsvc3 st-ref struct */
296 /* st 0 layer 0 - ref */
297 st_ref[0].is_non_ref = 0;
298 st_ref[0].temporal_id = 0;
299 st_ref[0].ref_mode = REF_TO_TEMPORAL_LAYER;
300 st_ref[0].ref_arg = 0;
301 st_ref[0].repeat = 0;
302 /* st 1 layer 2 - non-ref */
303 st_ref[1].is_non_ref = 0;
304 st_ref[1].temporal_id = 2; // layer 2
305 st_ref[1].ref_mode = REF_TO_TEMPORAL_LAYER;
306 st_ref[1].ref_arg = 0;
307 st_ref[1].repeat = 0;
308 /* st 2 layer 1 - ref */
309 st_ref[2].is_non_ref = 0; // layer 2
310 st_ref[2].temporal_id = 1; // layer 2
311 st_ref[2].ref_mode = REF_TO_TEMPORAL_LAYER; // layer 2
312 st_ref[2].ref_arg = 0; // layer 2
313 st_ref[2].repeat = 0; // layer 2
314 /* st 3 layer 2 - non-ref */
315 st_ref[3].is_non_ref = 0; // layer 3
316 st_ref[3].temporal_id = 2; // layer 2 3
317 st_ref[3].ref_mode = REF_TO_TEMPORAL_LAYER; // layer 3
318 st_ref[3].ref_arg = 1; // layer 3
319 st_ref[3].repeat = 0; // layer 3
320 /* st 4 layer 0 - ref */
321 st_ref[4].is_non_ref = 0; // layer 4
322 st_ref[4].temporal_id = 0; // layer 4
323 st_ref[4].ref_mode = REF_TO_TEMPORAL_LAYER; // layer 4
324 st_ref[4].ref_arg = 0; // layer 4
325 st_ref[4].repeat = 0; // layer 4
326
327 st_cfg_cnt = 5; // st_cfg_cnt = 5
328 tid0_loop = 4; // tid0_loop = 4
329 HDF_LOGE("tsvc3\n");
330 } break;
331 case 3 : { // set tsvc3
332 /* set tsvc3 st-ref struct */
333 /* st 0 layer 0 - ref */
334 st_ref[0].is_non_ref = 0;
335 st_ref[0].temporal_id = 0;
336 st_ref[0].ref_mode = REF_TO_TEMPORAL_LAYER;
337 st_ref[0].ref_arg = 0;
338 st_ref[0].repeat = 0;
339 /* st 1 layer 3 - non-ref */
340 st_ref[1].is_non_ref = 1;
341 st_ref[1].temporal_id = 3; // layer 3
342 st_ref[1].ref_mode = REF_TO_PREV_REF_FRM;
343 st_ref[1].ref_arg = 0;
344 st_ref[1].repeat = 0;
345 /* st 2 layer 2 - ref */
346 st_ref[2].is_non_ref = 0; // st 2
347 st_ref[2].temporal_id = 2; // st 2
348 st_ref[2].ref_mode = REF_TO_PREV_REF_FRM; // st 2
349 st_ref[2].ref_arg = 0; // st 2
350 st_ref[2].repeat = 0; // st 2
351 /* st 3 layer 3 - non-ref */
352 st_ref[3].is_non_ref = 1; // st 3
353 st_ref[3].temporal_id = 3; // st 3
354 st_ref[3].ref_mode = REF_TO_PREV_REF_FRM; // st 3
355 st_ref[3].ref_arg = 0; // st 3
356 st_ref[3].repeat = 0; // st 3
357 /* st 4 layer 1 - ref */
358 st_ref[4].is_non_ref = 0; // st 4
359 st_ref[4].temporal_id = 1; // st 4
360 st_ref[4].ref_mode = REF_TO_TEMPORAL_LAYER; // st 4
361 st_ref[4].ref_arg = 0; // st 4
362 st_ref[4].repeat = 0; // st 4
363 /* st 5 layer 3 - non-ref */
364 st_ref[5].is_non_ref = 1; // st 5
365 st_ref[5].temporal_id = 3; // st 5 layer 3
366 st_ref[5].ref_mode = REF_TO_PREV_REF_FRM; // st 5
367 st_ref[5].ref_arg = 0; // st 5
368 st_ref[5].repeat = 0; // st 5
369 /* st 6 layer 2 - ref */
370 st_ref[6].is_non_ref = 0; // st 6
371 st_ref[6].temporal_id = 2; // st 6 layer 2
372 st_ref[6].ref_mode = REF_TO_PREV_REF_FRM; // st 6
373 st_ref[6].ref_arg = 0; // st 6
374 st_ref[6].repeat = 0; // st 6
375 /* st 7 layer 3 - non-ref */
376 st_ref[7].is_non_ref = 1; // st 7
377 st_ref[7].temporal_id = 3; // st 7 layer 3
378 st_ref[7].ref_mode = REF_TO_PREV_REF_FRM; // st 7
379 st_ref[7].ref_arg = 0; // st 7
380 st_ref[7].repeat = 0; // st 7
381 /* st 8 layer 0 - ref */
382 st_ref[8].is_non_ref = 0; // st 8
383 st_ref[8].temporal_id = 0; // st 8
384 st_ref[8].ref_mode = REF_TO_PREV_REF_FRM; // st 8 layer 0 - ref
385 st_ref[8].ref_arg = 0; // st 8
386 st_ref[8].repeat = 0; // st 8
387
388 st_cfg_cnt = 9; // st_cfg_cnt = 9
389 tid0_loop = 8; // tid0_loop = 8
390 HDF_LOGE("tsvc4\n");
391 } break;
392 default : {
393 HDF_LOGE("invalid max temporal layer id %d\n", max_tid);
394 } break;
395 }
396
397 if (ltr_frames) {
398 RK_S32 i;
399
400 lt_cfg_cnt = ltr_frames;
401 for (i = 0; i < ltr_frames; i++) {
402 lt_ref[i].lt_idx = i;
403 lt_ref[i].temporal_id = 0;
404 lt_ref[i].ref_mode = REF_TO_PREV_LT_REF;
405 lt_ref[i].lt_gap = 0;
406 lt_ref[i].lt_delay = tid0_loop * i;
407 }
408 }
409
410 HDF_LOGE("lt_cfg_cnt %d st_cfg_cnt %d\n", lt_cfg_cnt, st_cfg_cnt);
411 if (lt_cfg_cnt || st_cfg_cnt) {
412 MppEncRefCfg ref = nullptr;
413
414 (*(mRKMppApi.HdiMppEncRefCfgInit))(&ref);
415
416 ret = (*(mRKMppApi.Hdimpp_enc_ref_cfg_set_cfg_cnt))(ref, lt_cfg_cnt, st_cfg_cnt);
417 ret = (*(mRKMppApi.Hdimpp_enc_ref_cfg_add_lt_cfg))(ref, lt_cfg_cnt, lt_ref);
418 ret = (*(mRKMppApi.Hdimpp_enc_ref_cfg_add_st_cfg))(ref, st_cfg_cnt, st_ref);
419 ret = (*(mRKMppApi.Hdimpp_enc_ref_cfg_set_keep_cpb))(ref, 1);
420 ret = (*(mRKMppApi.Hdimpp_enc_ref_cfg_check))(ref);
421
422 ret = mpi->control(mpp_ctx, MPP_ENC_SET_REF_CFG, ref);
423 if (ret)
424 HDF_LOGE("mpi control enc set ref cfg failed ret %d\n", ret);
425
426 (*(mRKMppApi.HdiMppEncRefCfgDeinit))(&ref);
427 } else {
428 ret = mpi->control(mpp_ctx, MPP_ENC_SET_REF_CFG, nullptr);
429 if (ret)
430 HDF_LOGE("mpi control enc set ref cfg failed ret %d\n", ret);
431 }
432
433 HDF_LOGE("leave ctx %p ret %d\n", ctx, ret);
434
435 return ret;
436 }
437