1 /*
2 * Copyright 2020 Rockchip Electronics Co. LTD
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define MODULE_TAG "vpu_api_mlvec"
18
19 #include "string.h"
20
21 #include "mpp_mem.h"
22 #include "mpp_debug.h"
23 #include "mpp_common.h"
24
25 #include "vpu_api_mlvec.h"
26 #include "vpu_api_legacy.h"
27
28 #define VPU_API_DBG_MLVEC_FUNC (0x00010000)
29 #define VPU_API_DBG_MLVEC_FLOW (0x00020000)
30
31 #define mlvec_dbg_func(fmt, ...) vpu_api_dbg_f(VPU_API_DBG_MLVEC_FUNC, fmt, ## __VA_ARGS__)
32 #define mlvec_dbg_flow(fmt, ...) vpu_api_dbg_f(VPU_API_DBG_MLVEC_FLOW, fmt, ## __VA_ARGS__)
33
34 typedef struct VpuApiMlvecImpl_t {
35 MppCtx mpp;
36 MppApi *mpi;
37 MppEncCfg enc_cfg;
38
39 VpuApiMlvecStaticCfg st_cfg;
40 VpuApiMlvecDynamicCfg dy_cfg;
41 } VpuApiMlvecImpl;
42
vpu_api_mlvec_init(VpuApiMlvec * ctx)43 MPP_RET vpu_api_mlvec_init(VpuApiMlvec *ctx)
44 {
45 if (NULL == ctx) {
46 mpp_err_f("invalid NULL input\n");
47 return MPP_ERR_NULL_PTR;
48 }
49
50 mlvec_dbg_func("enter %p\n", ctx);
51
52 VpuApiMlvecImpl *impl = mpp_calloc(VpuApiMlvecImpl, 1);
53 if (NULL == impl)
54 mpp_err_f("failed to create MLVEC context\n");
55
56 mpp_assert(sizeof(VpuApiMlvecStaticCfg) == sizeof(EncParameter_t));
57 /* default disable frame_qp setup */
58 impl->dy_cfg.frame_qp = -1;
59
60 *ctx = impl;
61
62 mlvec_dbg_func("leave %p %p\n", ctx, impl);
63 return (impl) ? (MPP_OK) : (MPP_NOK);
64 }
65
vpu_api_mlvec_deinit(VpuApiMlvec ctx)66 MPP_RET vpu_api_mlvec_deinit(VpuApiMlvec ctx)
67 {
68 mlvec_dbg_func("enter %p\n", ctx);
69 MPP_FREE(ctx);
70 mlvec_dbg_func("leave %p\n", ctx);
71 return MPP_OK;
72 }
73
vpu_api_mlvec_setup(VpuApiMlvec ctx,MppCtx mpp,MppApi * mpi,MppEncCfg enc_cfg)74 MPP_RET vpu_api_mlvec_setup(VpuApiMlvec ctx, MppCtx mpp, MppApi *mpi, MppEncCfg enc_cfg)
75 {
76 if (NULL == ctx || NULL == mpp || NULL == mpi || NULL == enc_cfg) {
77 mpp_err_f("invalid NULL input ctx %p mpp %p mpi %p cfg %p\n",
78 ctx, mpp, mpi, enc_cfg);
79 return MPP_ERR_NULL_PTR;
80 }
81
82 mlvec_dbg_func("enter %p\n", ctx);
83
84 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl *)ctx;
85 impl->mpp = mpp;
86 impl->mpi = mpi;
87 impl->enc_cfg = enc_cfg;
88
89 mlvec_dbg_func("leave %p\n", ctx);
90
91 return MPP_OK;
92 }
93
vpu_api_mlvec_check_cfg(void * p)94 MPP_RET vpu_api_mlvec_check_cfg(void *p)
95 {
96 if (NULL == p) {
97 mpp_err_f("invalid NULL input\n");
98 return MPP_ERR_NULL_PTR;
99 }
100
101 VpuApiMlvecStaticCfg *cfg = (VpuApiMlvecStaticCfg *)p;
102 RK_U32 magic = cfg->magic;
103 MPP_RET ret = MPP_OK;
104
105 if ((((magic >> 24) & 0xff) != MLVEC_MAGIC) ||
106 (((magic >> 16) & 0xff) != MLVEC_VERSION))
107 ret = MPP_NOK;
108
109 mlvec_dbg_flow("check mlvec cfg magic %08x %s\n", magic,
110 (ret == MPP_OK) ? "success" : "failed");
111
112 return ret;
113 }
114
vpu_api_mlvec_set_st_cfg(VpuApiMlvec ctx,VpuApiMlvecStaticCfg * cfg)115 MPP_RET vpu_api_mlvec_set_st_cfg(VpuApiMlvec ctx, VpuApiMlvecStaticCfg *cfg)
116 {
117 if (NULL == ctx || NULL == cfg) {
118 mpp_err_f("invalid NULL input ctx %p cfg %p\n");
119 return MPP_ERR_NULL_PTR;
120 }
121
122 mlvec_dbg_func("enter ctx %p cfg %p\n", ctx, cfg);
123
124 /* check mlvec magic word */
125 if (vpu_api_mlvec_check_cfg(cfg))
126 return MPP_NOK;
127
128 MPP_RET ret = MPP_OK;
129 /* update static configure */
130 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl *)ctx;
131
132 memcpy(&impl->st_cfg, cfg, sizeof(impl->st_cfg));
133 cfg = &impl->st_cfg;
134
135 /* get mpp context and check */
136 MppCtx mpp_ctx = impl->mpp;
137 MppApi *mpi = impl->mpi;
138 MppEncCfg enc_cfg = impl->enc_cfg;
139
140 mpp_assert(mpp_ctx);
141 mpp_assert(mpi);
142 mpp_assert(enc_cfg);
143
144 /* start control mpp */
145 mlvec_dbg_flow("hdr_on_idr %d\n", cfg->hdr_on_idr);
146 MppEncHeaderMode mode = cfg->hdr_on_idr ?
147 MPP_ENC_HEADER_MODE_EACH_IDR :
148 MPP_ENC_HEADER_MODE_DEFAULT;
149
150 ret = mpi->control(mpp_ctx, MPP_ENC_SET_HEADER_MODE, &mode);
151 if (ret)
152 mpp_err("setup enc header mode %d failed ret %d\n", mode, ret);
153
154 mlvec_dbg_flow("add_prefix %d\n", cfg->add_prefix);
155 mpp_enc_cfg_set_s32(enc_cfg, "h264:prefix_mode", cfg->add_prefix);
156
157 mlvec_dbg_flow("slice_mbs %d\n", cfg->slice_mbs);
158 if (cfg->slice_mbs) {
159 mpp_enc_cfg_set_u32(enc_cfg, "split:mode", MPP_ENC_SPLIT_BY_CTU);
160 mpp_enc_cfg_set_u32(enc_cfg, "split:arg", cfg->slice_mbs);
161 } else
162 mpp_enc_cfg_set_u32(enc_cfg, "split:mode", MPP_ENC_SPLIT_NONE);
163
164 /* NOTE: ltr_frames is already configured */
165 vpu_api_mlvec_set_dy_max_tid(ctx, cfg->max_tid);
166
167 mlvec_dbg_func("leave ctx %p ret %d\n", ctx, ret);
168
169 return ret;
170 }
171
vpu_api_mlvec_set_dy_cfg(VpuApiMlvec ctx,VpuApiMlvecDynamicCfg * cfg,MppMeta meta)172 MPP_RET vpu_api_mlvec_set_dy_cfg(VpuApiMlvec ctx, VpuApiMlvecDynamicCfg *cfg, MppMeta meta)
173 {
174 if (NULL == ctx || NULL == cfg || NULL == meta) {
175 mpp_err_f("invalid NULL input ctx %p cfg %p meta %p\n",
176 ctx, cfg, meta);
177 return MPP_ERR_NULL_PTR;
178 }
179
180 mlvec_dbg_func("enter ctx %p cfg %p meta %p\n", ctx, cfg, meta);
181
182 MPP_RET ret = MPP_OK;
183 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl *)ctx;
184 VpuApiMlvecDynamicCfg *dst = &impl->dy_cfg;
185
186 /* clear non-sticky flag first */
187 dst->mark_ltr = -1;
188 dst->use_ltr = -1;
189 /* frame qp and base layer pid is sticky flag */
190
191 /* update flags */
192 if (cfg->updated) {
193 if (cfg->updated & VPU_API_ENC_MARK_LTR_UPDATED)
194 dst->mark_ltr = cfg->mark_ltr;
195
196 if (cfg->updated & VPU_API_ENC_USE_LTR_UPDATED)
197 dst->use_ltr = cfg->use_ltr;
198
199 if (cfg->updated & VPU_API_ENC_FRAME_QP_UPDATED)
200 dst->frame_qp = cfg->frame_qp;
201
202 if (cfg->updated & VPU_API_ENC_BASE_PID_UPDATED)
203 dst->base_layer_pid = cfg->base_layer_pid;
204
205 /* dynamic max temporal layer count updated go through mpp ref cfg */
206 cfg->updated = 0;
207 }
208
209 mlvec_dbg_flow("ltr mark %2d use %2d frm qp %2d blpid %d\n", dst->mark_ltr,
210 dst->use_ltr, dst->frame_qp, dst->base_layer_pid);
211
212 /* setup next frame configure */
213 if (dst->mark_ltr >= 0)
214 mpp_meta_set_s32(meta, KEY_ENC_MARK_LTR, dst->mark_ltr);
215
216 if (dst->use_ltr >= 0)
217 mpp_meta_set_s32(meta, KEY_ENC_USE_LTR, dst->use_ltr);
218
219 if (dst->frame_qp >= 0)
220 mpp_meta_set_s32(meta, KEY_ENC_FRAME_QP, dst->frame_qp);
221
222 if (dst->base_layer_pid >= 0)
223 mpp_meta_set_s32(meta, KEY_ENC_BASE_LAYER_PID, dst->base_layer_pid);
224
225 mlvec_dbg_func("leave ctx %p ret %d\n", ctx, ret);
226
227 return ret;
228 }
229
vpu_api_mlvec_set_dy_max_tid(VpuApiMlvec ctx,RK_S32 max_tid)230 MPP_RET vpu_api_mlvec_set_dy_max_tid(VpuApiMlvec ctx, RK_S32 max_tid)
231 {
232 if (NULL == ctx) {
233 mpp_err_f("invalid NULL input\n");
234 return MPP_ERR_NULL_PTR;
235 }
236
237 mlvec_dbg_func("enter ctx %p max_tid %d\n", ctx, max_tid);
238
239 MPP_RET ret = MPP_OK;
240 VpuApiMlvecImpl *impl = (VpuApiMlvecImpl *)ctx;
241 MppCtx mpp_ctx = impl->mpp;
242 MppApi *mpi = impl->mpi;
243 MppEncCfg enc_cfg = impl->enc_cfg;
244
245 mpp_assert(mpp_ctx);
246 mpp_assert(mpi);
247 mpp_assert(enc_cfg);
248
249 MppEncRefLtFrmCfg lt_ref[16];
250 MppEncRefStFrmCfg st_ref[16];
251 RK_S32 lt_cfg_cnt = 0;
252 RK_S32 st_cfg_cnt = 0;
253 RK_S32 tid0_loop = 0;
254 RK_S32 ltr_frames = impl->st_cfg.ltr_frames;
255
256 memset(lt_ref, 0, sizeof(lt_ref));
257 memset(st_ref, 0, sizeof(st_ref));
258
259 mlvec_dbg_flow("ltr_frames %d\n", ltr_frames);
260 mlvec_dbg_flow("max_tid %d\n", max_tid);
261
262 switch (max_tid) {
263 case 0 : {
264 st_ref[0].is_non_ref = 0;
265 st_ref[0].temporal_id = 0;
266 st_ref[0].ref_mode = REF_TO_PREV_REF_FRM;
267 st_ref[0].ref_arg = 0;
268 st_ref[0].repeat = 0;
269
270 st_cfg_cnt = 1;
271 tid0_loop = 1;
272 mlvec_dbg_flow("no tsvc\n");
273 } break;
274 case 1 : {
275 /* set tsvc2 st-ref struct */
276 /* st 0 layer 0 - ref */
277 st_ref[0].is_non_ref = 0;
278 st_ref[0].temporal_id = 0;
279 st_ref[0].ref_mode = REF_TO_PREV_REF_FRM;
280 st_ref[0].ref_arg = 0;
281 st_ref[0].repeat = 0;
282 /* st 1 layer 1 - non-ref */
283 st_ref[1].is_non_ref = 1;
284 st_ref[1].temporal_id = 1;
285 st_ref[1].ref_mode = REF_TO_PREV_REF_FRM;
286 st_ref[1].ref_arg = 0;
287 st_ref[1].repeat = 0;
288 /* st 2 layer 0 - ref */
289 st_ref[2].is_non_ref = 0;
290 st_ref[2].temporal_id = 0;
291 st_ref[2].ref_mode = REF_TO_PREV_REF_FRM;
292 st_ref[2].ref_arg = 0;
293 st_ref[2].repeat = 0;
294
295 st_cfg_cnt = 3;
296 tid0_loop = 2;
297 mlvec_dbg_flow("tsvc2\n");
298 } break;
299 case 2 : {
300 /* set tsvc3 st-ref struct */
301 /* st 0 layer 0 - ref */
302 st_ref[0].is_non_ref = 0;
303 st_ref[0].temporal_id = 0;
304 st_ref[0].ref_mode = REF_TO_TEMPORAL_LAYER;
305 st_ref[0].ref_arg = 0;
306 st_ref[0].repeat = 0;
307 /* st 1 layer 2 - non-ref */
308 st_ref[1].is_non_ref = 0;
309 st_ref[1].temporal_id = 2;
310 st_ref[1].ref_mode = REF_TO_TEMPORAL_LAYER;
311 st_ref[1].ref_arg = 0;
312 st_ref[1].repeat = 0;
313 /* st 2 layer 1 - ref */
314 st_ref[2].is_non_ref = 0;
315 st_ref[2].temporal_id = 1;
316 st_ref[2].ref_mode = REF_TO_TEMPORAL_LAYER;
317 st_ref[2].ref_arg = 0;
318 st_ref[2].repeat = 0;
319 /* st 3 layer 2 - non-ref */
320 st_ref[3].is_non_ref = 0;
321 st_ref[3].temporal_id = 2;
322 st_ref[3].ref_mode = REF_TO_TEMPORAL_LAYER;
323 st_ref[3].ref_arg = 1;
324 st_ref[3].repeat = 0;
325 /* st 4 layer 0 - ref */
326 st_ref[4].is_non_ref = 0;
327 st_ref[4].temporal_id = 0;
328 st_ref[4].ref_mode = REF_TO_TEMPORAL_LAYER;
329 st_ref[4].ref_arg = 0;
330 st_ref[4].repeat = 0;
331
332 st_cfg_cnt = 5;
333 tid0_loop = 4;
334 mlvec_dbg_flow("tsvc3\n");
335 } break;
336 case 3 : {
337 /* set tsvc3 st-ref struct */
338 /* st 0 layer 0 - ref */
339 st_ref[0].is_non_ref = 0;
340 st_ref[0].temporal_id = 0;
341 st_ref[0].ref_mode = REF_TO_TEMPORAL_LAYER;
342 st_ref[0].ref_arg = 0;
343 st_ref[0].repeat = 0;
344 /* st 1 layer 3 - non-ref */
345 st_ref[1].is_non_ref = 1;
346 st_ref[1].temporal_id = 3;
347 st_ref[1].ref_mode = REF_TO_PREV_REF_FRM;
348 st_ref[1].ref_arg = 0;
349 st_ref[1].repeat = 0;
350 /* st 2 layer 2 - ref */
351 st_ref[2].is_non_ref = 0;
352 st_ref[2].temporal_id = 2;
353 st_ref[2].ref_mode = REF_TO_PREV_REF_FRM;
354 st_ref[2].ref_arg = 0;
355 st_ref[2].repeat = 0;
356 /* st 3 layer 3 - non-ref */
357 st_ref[3].is_non_ref = 1;
358 st_ref[3].temporal_id = 3;
359 st_ref[3].ref_mode = REF_TO_PREV_REF_FRM;
360 st_ref[3].ref_arg = 0;
361 st_ref[3].repeat = 0;
362 /* st 4 layer 1 - ref */
363 st_ref[4].is_non_ref = 0;
364 st_ref[4].temporal_id = 1;
365 st_ref[4].ref_mode = REF_TO_TEMPORAL_LAYER;
366 st_ref[4].ref_arg = 0;
367 st_ref[4].repeat = 0;
368 /* st 5 layer 3 - non-ref */
369 st_ref[5].is_non_ref = 1;
370 st_ref[5].temporal_id = 3;
371 st_ref[5].ref_mode = REF_TO_PREV_REF_FRM;
372 st_ref[5].ref_arg = 0;
373 st_ref[5].repeat = 0;
374 /* st 6 layer 2 - ref */
375 st_ref[6].is_non_ref = 0;
376 st_ref[6].temporal_id = 2;
377 st_ref[6].ref_mode = REF_TO_PREV_REF_FRM;
378 st_ref[6].ref_arg = 0;
379 st_ref[6].repeat = 0;
380 /* st 7 layer 3 - non-ref */
381 st_ref[7].is_non_ref = 1;
382 st_ref[7].temporal_id = 3;
383 st_ref[7].ref_mode = REF_TO_PREV_REF_FRM;
384 st_ref[7].ref_arg = 0;
385 st_ref[7].repeat = 0;
386 /* st 8 layer 0 - ref */
387 st_ref[8].is_non_ref = 0;
388 st_ref[8].temporal_id = 0;
389 st_ref[8].ref_mode = REF_TO_PREV_REF_FRM;
390 st_ref[8].ref_arg = 0;
391 st_ref[8].repeat = 0;
392
393 st_cfg_cnt = 9;
394 tid0_loop = 8;
395 mlvec_dbg_flow("tsvc4\n");
396 } break;
397 default : {
398 mpp_err("invalid max temporal layer id %d\n", max_tid);
399 } break;
400 }
401
402 if (ltr_frames) {
403 RK_S32 i;
404
405 lt_cfg_cnt = ltr_frames;
406 mpp_assert(ltr_frames <= MPP_ENC_MAX_LT_REF_NUM);
407 for (i = 0; i < ltr_frames; i++) {
408 lt_ref[i].lt_idx = i;
409 lt_ref[i].temporal_id = 0;
410 lt_ref[i].ref_mode = REF_TO_PREV_LT_REF;
411 lt_ref[i].lt_gap = 0;
412 lt_ref[i].lt_delay = tid0_loop * i;
413 }
414 }
415
416 if (lt_cfg_cnt)
417 mpp_assert(st_cfg_cnt);
418
419 mlvec_dbg_flow("lt_cfg_cnt %d st_cfg_cnt %d\n", lt_cfg_cnt, st_cfg_cnt);
420 if (lt_cfg_cnt || st_cfg_cnt) {
421 MppEncRefCfg ref = NULL;
422
423 mpp_enc_ref_cfg_init(&ref);
424
425 ret = mpp_enc_ref_cfg_set_cfg_cnt(ref, lt_cfg_cnt, st_cfg_cnt);
426 ret = mpp_enc_ref_cfg_add_lt_cfg(ref, lt_cfg_cnt, lt_ref);
427 ret = mpp_enc_ref_cfg_add_st_cfg(ref, st_cfg_cnt, st_ref);
428 ret = mpp_enc_ref_cfg_set_keep_cpb(ref, 1);
429 ret = mpp_enc_ref_cfg_check(ref);
430
431 ret = mpi->control(mpp_ctx, MPP_ENC_SET_REF_CFG, ref);
432 if (ret)
433 mpp_err("mpi control enc set ref cfg failed ret %d\n", ret);
434
435 mpp_enc_ref_cfg_deinit(&ref);
436 } else {
437 ret = mpi->control(mpp_ctx, MPP_ENC_SET_REF_CFG, NULL);
438 if (ret)
439 mpp_err("mpi control enc set ref cfg failed ret %d\n", ret);
440 }
441
442 mlvec_dbg_func("leave ctx %p ret %d\n", ctx, ret);
443
444 return ret;
445 }
446