1 /*
2 * Copyright 2020 Rockchip Electronics Co. LTD
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define MODULE_TAG "vpu_api_mlvec"
18
19 #include <fcntl.h>
20 #include "string.h"
21
22 #include "mpp_mem.h"
23 #include "mpp_debug.h"
24 #include "mpp_common.h"
25
26 #include "vpu_api_mlvec.h"
27 #include "vpu_api_legacy.h"
28
29 #define VPU_API_DBG_MLVEC_FUNC (0x00010000)
30 #define VPU_API_DBG_MLVEC_FLOW (0x00020000)
31
32 #define mlvec_dbg_func(fmt, ...) vpu_api_dbg_f(VPU_API_DBG_MLVEC_FUNC, fmt, ## __VA_ARGS__)
33 #define mlvec_dbg_flow(fmt, ...) vpu_api_dbg_f(VPU_API_DBG_MLVEC_FLOW, fmt, ## __VA_ARGS__)
34
35 typedef struct VpuApiMlvecImpl_t {
36 MppCtx mpp;
37 MppApi *mpi;
38 MppEncCfg enc_cfg;
39
40 VpuApiMlvecStaticCfg st_cfg;
41 VpuApiMlvecDynamicCfg dy_cfg;
42 } VpuApiMlvecImpl;
43
vpu_api_mlvec_init(VpuApiMlvec * ctx)44 MPP_RET vpu_api_mlvec_init(VpuApiMlvec *ctx)
45 {
46 if (NULL == ctx) {
47 mpp_err_f("invalid NULL input\n");
48 return MPP_ERR_NULL_PTR;
49 }
50
51 mlvec_dbg_func("enter %p\n", ctx);
52
53 VpuApiMlvecImpl *impl = mpp_calloc(VpuApiMlvecImpl, 1);
54 if (NULL == impl)
55 mpp_err_f("failed to create MLVEC context\n");
56
57 mpp_assert(sizeof(VpuApiMlvecStaticCfg) == sizeof(EncParameter_t));
58 /* default disable frame_qp setup */
59 impl->dy_cfg.frame_qp = -1;
60
61 *ctx = impl;
62
63 mlvec_dbg_func("leave %p %p\n", ctx, impl);
64 return (impl) ? (MPP_OK) : (MPP_NOK);
65 }
66
vpu_api_mlvec_deinit(VpuApiMlvec ctx)67 MPP_RET vpu_api_mlvec_deinit(VpuApiMlvec ctx)
68 {
69 mlvec_dbg_func("enter %p\n", ctx);
70 MPP_FREE(ctx);
71 mlvec_dbg_func("leave %p\n", ctx);
72 return MPP_OK;
73 }
74
vpu_api_mlvec_setup(VpuApiMlvec ctx,MppCtx mpp,MppApi * mpi,MppEncCfg enc_cfg)75 MPP_RET vpu_api_mlvec_setup(VpuApiMlvec ctx, MppCtx mpp, MppApi *mpi, MppEncCfg enc_cfg)
76 {
77 if (NULL == ctx || NULL == mpp || NULL == mpi || NULL == enc_cfg) {
78 mpp_err_f("invalid NULL input ctx %p mpp %p mpi %p cfg %p\n",
79 ctx, mpp, mpi, enc_cfg);
80 return MPP_ERR_NULL_PTR;
81 }
82
83 mlvec_dbg_func("enter %p\n", ctx);
84
85 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl *)ctx;
86 impl->mpp = mpp;
87 impl->mpi = mpi;
88 impl->enc_cfg = enc_cfg;
89
90 mlvec_dbg_func("leave %p\n", ctx);
91
92 return MPP_OK;
93 }
94
vpu_api_mlvec_check_cfg(void * p)95 MPP_RET vpu_api_mlvec_check_cfg(void *p)
96 {
97 if (NULL == p) {
98 mpp_err_f("invalid NULL input\n");
99 return MPP_ERR_NULL_PTR;
100 }
101
102 VpuApiMlvecStaticCfg *cfg = (VpuApiMlvecStaticCfg *)p;
103 RK_U32 magic = cfg->magic;
104 MPP_RET ret = MPP_OK;
105
106 if ((((magic >> 24) & 0xff) != MLVEC_MAGIC) ||
107 (((magic >> 16) & 0xff) != MLVEC_VERSION))
108 ret = MPP_NOK;
109
110 mlvec_dbg_flow("check mlvec cfg magic %08x %s\n", magic,
111 (ret == MPP_OK) ? "success" : "failed");
112
113 return ret;
114 }
115
vpu_api_mlvec_set_st_cfg(VpuApiMlvec ctx,VpuApiMlvecStaticCfg * cfg)116 MPP_RET vpu_api_mlvec_set_st_cfg(VpuApiMlvec ctx, VpuApiMlvecStaticCfg *cfg)
117 {
118 if (NULL == ctx || NULL == cfg) {
119 mpp_err_f("invalid NULL input ctx %p cfg %p\n");
120 return MPP_ERR_NULL_PTR;
121 }
122
123 mlvec_dbg_func("enter ctx %p cfg %p\n", ctx, cfg);
124
125 /* check mlvec magic word */
126 if (vpu_api_mlvec_check_cfg(cfg))
127 return MPP_NOK;
128
129 MPP_RET ret = MPP_OK;
130 /* update static configure */
131 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl *)ctx;
132
133 memcpy(&impl->st_cfg, cfg, sizeof(impl->st_cfg));
134 cfg = &impl->st_cfg;
135
136 /* get mpp context and check */
137 MppCtx mpp_ctx = impl->mpp;
138 MppApi *mpi = impl->mpi;
139 MppEncCfg enc_cfg = impl->enc_cfg;
140
141 mpp_assert(mpp_ctx);
142 mpp_assert(mpi);
143 mpp_assert(enc_cfg);
144
145 /* start control mpp */
146 mlvec_dbg_flow("hdr_on_idr %d\n", cfg->hdr_on_idr);
147 MppEncHeaderMode mode = cfg->hdr_on_idr ?
148 MPP_ENC_HEADER_MODE_EACH_IDR :
149 MPP_ENC_HEADER_MODE_DEFAULT;
150
151 ret = mpi->control(mpp_ctx, MPP_ENC_SET_HEADER_MODE, &mode);
152 if (ret)
153 mpp_err("setup enc header mode %d failed ret %d\n", mode, ret);
154
155 mlvec_dbg_flow("add_prefix %d\n", cfg->add_prefix);
156 mpp_enc_cfg_set_s32(enc_cfg, "h264:prefix_mode", cfg->add_prefix);
157
158 mlvec_dbg_flow("slice_mbs %d\n", cfg->slice_mbs);
159 if (cfg->slice_mbs) {
160 mpp_enc_cfg_set_u32(enc_cfg, "split:mode", MPP_ENC_SPLIT_BY_CTU);
161 mpp_enc_cfg_set_u32(enc_cfg, "split:arg", cfg->slice_mbs);
162 } else
163 mpp_enc_cfg_set_u32(enc_cfg, "split:mode", MPP_ENC_SPLIT_NONE);
164
165 /* NOTE: ltr_frames is already configured */
166 vpu_api_mlvec_set_dy_max_tid(ctx, cfg->max_tid);
167
168 mlvec_dbg_func("leave ctx %p ret %d\n", ctx, ret);
169
170 return ret;
171 }
172
vpu_api_mlvec_set_dy_cfg(VpuApiMlvec ctx,VpuApiMlvecDynamicCfg * cfg,MppMeta meta)173 MPP_RET vpu_api_mlvec_set_dy_cfg(VpuApiMlvec ctx, VpuApiMlvecDynamicCfg *cfg, MppMeta meta)
174 {
175 if (NULL == ctx || NULL == cfg || NULL == meta) {
176 mpp_err_f("invalid NULL input ctx %p cfg %p meta %p\n",
177 ctx, cfg, meta);
178 return MPP_ERR_NULL_PTR;
179 }
180
181 mlvec_dbg_func("enter ctx %p cfg %p meta %p\n", ctx, cfg, meta);
182
183 MPP_RET ret = MPP_OK;
184 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl *)ctx;
185 VpuApiMlvecDynamicCfg *dst = &impl->dy_cfg;
186
187 /* clear non-sticky flag first */
188 dst->mark_ltr = -1;
189 dst->use_ltr = -1;
190 /* frame qp and base layer pid is sticky flag */
191
192 /* update flags */
193 if (cfg->updated) {
194 if (cfg->updated & VPU_API_ENC_MARK_LTR_UPDATED)
195 dst->mark_ltr = cfg->mark_ltr;
196
197 if (cfg->updated & VPU_API_ENC_USE_LTR_UPDATED)
198 dst->use_ltr = cfg->use_ltr;
199
200 if (cfg->updated & VPU_API_ENC_FRAME_QP_UPDATED)
201 dst->frame_qp = cfg->frame_qp;
202
203 if (cfg->updated & VPU_API_ENC_BASE_PID_UPDATED)
204 dst->base_layer_pid = cfg->base_layer_pid;
205
206 /* dynamic max temporal layer count updated go through mpp ref cfg */
207 cfg->updated = 0;
208 }
209
210 mlvec_dbg_flow("ltr mark %2d use %2d frm qp %2d blpid %d\n", dst->mark_ltr,
211 dst->use_ltr, dst->frame_qp, dst->base_layer_pid);
212
213 /* setup next frame configure */
214 if (dst->mark_ltr >= 0)
215 mpp_meta_set_s32(meta, KEY_ENC_MARK_LTR, dst->mark_ltr);
216
217 if (dst->use_ltr >= 0)
218 mpp_meta_set_s32(meta, KEY_ENC_USE_LTR, dst->use_ltr);
219
220 if (dst->frame_qp >= 0)
221 mpp_meta_set_s32(meta, KEY_ENC_FRAME_QP, dst->frame_qp);
222
223 if (dst->base_layer_pid >= 0)
224 mpp_meta_set_s32(meta, KEY_ENC_BASE_LAYER_PID, dst->base_layer_pid);
225
226 mlvec_dbg_func("leave ctx %p ret %d\n", ctx, ret);
227
228 return ret;
229 }
230
vpu_api_mlvec_set_dy_max_tid(VpuApiMlvec ctx,RK_S32 max_tid)231 MPP_RET vpu_api_mlvec_set_dy_max_tid(VpuApiMlvec ctx, RK_S32 max_tid)
232 {
233 if (NULL == ctx) {
234 mpp_err_f("invalid NULL input\n");
235 return MPP_ERR_NULL_PTR;
236 }
237
238 mlvec_dbg_func("enter ctx %p max_tid %d\n", ctx, max_tid);
239
240 MPP_RET ret = MPP_OK;
241 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl *)ctx;
242 MppCtx mpp_ctx = impl->mpp;
243 MppApi *mpi = impl->mpi;
244 MppEncCfg enc_cfg = impl->enc_cfg;
245
246 mpp_assert(mpp_ctx);
247 mpp_assert(mpi);
248 mpp_assert(enc_cfg);
249
250 MppEncRefLtFrmCfg lt_ref[16];
251 MppEncRefStFrmCfg st_ref[16];
252 RK_S32 lt_cfg_cnt = 0;
253 RK_S32 st_cfg_cnt = 0;
254 RK_S32 tid0_loop = 0;
255 RK_S32 ltr_frames = impl->st_cfg.ltr_frames;
256
257 memset(lt_ref, 0, sizeof(lt_ref));
258 memset(st_ref, 0, sizeof(st_ref));
259
260 mlvec_dbg_flow("ltr_frames %d\n", ltr_frames);
261 mlvec_dbg_flow("max_tid %d\n", max_tid);
262
263 switch (max_tid) {
264 case 0 : {
265 st_ref[0].is_non_ref = 0;
266 st_ref[0].temporal_id = 0;
267 st_ref[0].ref_mode = REF_TO_PREV_REF_FRM;
268 st_ref[0].ref_arg = 0;
269 st_ref[0].repeat = 0;
270
271 st_cfg_cnt = 1;
272 tid0_loop = 1;
273 mlvec_dbg_flow("no tsvc\n");
274 } break;
275 case 1 : {
276 /* set tsvc2 st-ref struct */
277 /* st 0 layer 0 - ref */
278 st_ref[0].is_non_ref = 0;
279 st_ref[0].temporal_id = 0;
280 st_ref[0].ref_mode = REF_TO_PREV_REF_FRM;
281 st_ref[0].ref_arg = 0;
282 st_ref[0].repeat = 0;
283 /* st 1 layer 1 - non-ref */
284 st_ref[1].is_non_ref = 1;
285 st_ref[1].temporal_id = 1;
286 st_ref[1].ref_mode = REF_TO_PREV_REF_FRM;
287 st_ref[1].ref_arg = 0;
288 st_ref[1].repeat = 0;
289 /* st 2 layer 0 - ref */
290 st_ref[2].is_non_ref = 0;
291 st_ref[2].temporal_id = 0;
292 st_ref[2].ref_mode = REF_TO_PREV_REF_FRM;
293 st_ref[2].ref_arg = 0;
294 st_ref[2].repeat = 0;
295
296 st_cfg_cnt = 3;
297 tid0_loop = 2;
298 mlvec_dbg_flow("tsvc2\n");
299 } break;
300 case 2 : {
301 /* set tsvc3 st-ref struct */
302 /* st 0 layer 0 - ref */
303 st_ref[0].is_non_ref = 0;
304 st_ref[0].temporal_id = 0;
305 st_ref[0].ref_mode = REF_TO_TEMPORAL_LAYER;
306 st_ref[0].ref_arg = 0;
307 st_ref[0].repeat = 0;
308 /* st 1 layer 2 - non-ref */
309 st_ref[1].is_non_ref = 0;
310 st_ref[1].temporal_id = 2;
311 st_ref[1].ref_mode = REF_TO_TEMPORAL_LAYER;
312 st_ref[1].ref_arg = 0;
313 st_ref[1].repeat = 0;
314 /* st 2 layer 1 - ref */
315 st_ref[2].is_non_ref = 0;
316 st_ref[2].temporal_id = 1;
317 st_ref[2].ref_mode = REF_TO_TEMPORAL_LAYER;
318 st_ref[2].ref_arg = 0;
319 st_ref[2].repeat = 0;
320 /* st 3 layer 2 - non-ref */
321 st_ref[3].is_non_ref = 0;
322 st_ref[3].temporal_id = 2;
323 st_ref[3].ref_mode = REF_TO_TEMPORAL_LAYER;
324 st_ref[3].ref_arg = 1;
325 st_ref[3].repeat = 0;
326 /* st 4 layer 0 - ref */
327 st_ref[4].is_non_ref = 0;
328 st_ref[4].temporal_id = 0;
329 st_ref[4].ref_mode = REF_TO_TEMPORAL_LAYER;
330 st_ref[4].ref_arg = 0;
331 st_ref[4].repeat = 0;
332
333 st_cfg_cnt = 5;
334 tid0_loop = 4;
335 mlvec_dbg_flow("tsvc3\n");
336 } break;
337 case 3 : {
338 /* set tsvc3 st-ref struct */
339 /* st 0 layer 0 - ref */
340 st_ref[0].is_non_ref = 0;
341 st_ref[0].temporal_id = 0;
342 st_ref[0].ref_mode = REF_TO_TEMPORAL_LAYER;
343 st_ref[0].ref_arg = 0;
344 st_ref[0].repeat = 0;
345 /* st 1 layer 3 - non-ref */
346 st_ref[1].is_non_ref = 1;
347 st_ref[1].temporal_id = 3;
348 st_ref[1].ref_mode = REF_TO_PREV_REF_FRM;
349 st_ref[1].ref_arg = 0;
350 st_ref[1].repeat = 0;
351 /* st 2 layer 2 - ref */
352 st_ref[2].is_non_ref = 0;
353 st_ref[2].temporal_id = 2;
354 st_ref[2].ref_mode = REF_TO_PREV_REF_FRM;
355 st_ref[2].ref_arg = 0;
356 st_ref[2].repeat = 0;
357 /* st 3 layer 3 - non-ref */
358 st_ref[3].is_non_ref = 1;
359 st_ref[3].temporal_id = 3;
360 st_ref[3].ref_mode = REF_TO_PREV_REF_FRM;
361 st_ref[3].ref_arg = 0;
362 st_ref[3].repeat = 0;
363 /* st 4 layer 1 - ref */
364 st_ref[4].is_non_ref = 0;
365 st_ref[4].temporal_id = 1;
366 st_ref[4].ref_mode = REF_TO_TEMPORAL_LAYER;
367 st_ref[4].ref_arg = 0;
368 st_ref[4].repeat = 0;
369 /* st 5 layer 3 - non-ref */
370 st_ref[5].is_non_ref = 1;
371 st_ref[5].temporal_id = 3;
372 st_ref[5].ref_mode = REF_TO_PREV_REF_FRM;
373 st_ref[5].ref_arg = 0;
374 st_ref[5].repeat = 0;
375 /* st 6 layer 2 - ref */
376 st_ref[6].is_non_ref = 0;
377 st_ref[6].temporal_id = 2;
378 st_ref[6].ref_mode = REF_TO_PREV_REF_FRM;
379 st_ref[6].ref_arg = 0;
380 st_ref[6].repeat = 0;
381 /* st 7 layer 3 - non-ref */
382 st_ref[7].is_non_ref = 1;
383 st_ref[7].temporal_id = 3;
384 st_ref[7].ref_mode = REF_TO_PREV_REF_FRM;
385 st_ref[7].ref_arg = 0;
386 st_ref[7].repeat = 0;
387 /* st 8 layer 0 - ref */
388 st_ref[8].is_non_ref = 0;
389 st_ref[8].temporal_id = 0;
390 st_ref[8].ref_mode = REF_TO_PREV_REF_FRM;
391 st_ref[8].ref_arg = 0;
392 st_ref[8].repeat = 0;
393
394 st_cfg_cnt = 9;
395 tid0_loop = 8;
396 mlvec_dbg_flow("tsvc4\n");
397 } break;
398 default : {
399 mpp_err("invalid max temporal layer id %d\n", max_tid);
400 } break;
401 }
402
403 if (ltr_frames) {
404 RK_S32 i;
405
406 lt_cfg_cnt = ltr_frames;
407 mpp_assert(ltr_frames <= MPP_ENC_MAX_LT_REF_NUM);
408 for (i = 0; i < ltr_frames; i++) {
409 lt_ref[i].lt_idx = i;
410 lt_ref[i].temporal_id = 0;
411 lt_ref[i].ref_mode = REF_TO_PREV_LT_REF;
412 lt_ref[i].lt_gap = 0;
413 lt_ref[i].lt_delay = tid0_loop * i;
414 }
415 }
416
417 if (lt_cfg_cnt)
418 mpp_assert(st_cfg_cnt);
419
420 mlvec_dbg_flow("lt_cfg_cnt %d st_cfg_cnt %d\n", lt_cfg_cnt, st_cfg_cnt);
421 if (lt_cfg_cnt || st_cfg_cnt) {
422 MppEncRefCfg ref = NULL;
423
424 mpp_enc_ref_cfg_init(&ref);
425
426 ret = mpp_enc_ref_cfg_set_cfg_cnt(ref, lt_cfg_cnt, st_cfg_cnt);
427 ret = mpp_enc_ref_cfg_add_lt_cfg(ref, lt_cfg_cnt, lt_ref);
428 ret = mpp_enc_ref_cfg_add_st_cfg(ref, st_cfg_cnt, st_ref);
429 ret = mpp_enc_ref_cfg_set_keep_cpb(ref, 1);
430 ret = mpp_enc_ref_cfg_check(ref);
431
432 ret = mpi->control(mpp_ctx, MPP_ENC_SET_REF_CFG, ref);
433 if (ret)
434 mpp_err("mpi control enc set ref cfg failed ret %d\n", ret);
435
436 mpp_enc_ref_cfg_deinit(&ref);
437 } else {
438 ret = mpi->control(mpp_ctx, MPP_ENC_SET_REF_CFG, NULL);
439 if (ret)
440 mpp_err("mpi control enc set ref cfg failed ret %d\n", ret);
441 }
442
443 mlvec_dbg_func("leave ctx %p ret %d\n", ctx, ret);
444
445 return ret;
446 }
447