1 /*
2 * Copyright 2017 Rockchip Electronics Co. LTD
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define MODULE_TAG "hal_vp8e_base"
18
19 #include <string.h>
20
21 #include "mpp_mem.h"
22 #include "mpp_buffer.h"
23 #include "mpp_common.h"
24
25 #include "hal_vp8e_base.h"
26 #include "hal_vp8e_putbit.h"
27 #include "hal_vp8e_table.h"
28 #include "hal_vp8e_debug.h"
29
set_frame_params(void * hal)30 static MPP_RET set_frame_params(void *hal)
31 {
32
33 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
34 {
35 RK_S32 i;
36 Pps *pps = ctx->ppss.pps;
37 Vp8eSps *sps = &ctx->sps;
38
39 for (i = 0; i < 4; i++) {
40 pps->qp_sgm[i] = ctx->rc->qp_hdr;
41 pps->level_sgm[i] = sps->filter_level;
42 }
43 }
44
45 return MPP_OK;
46 }
47
set_filter(void * hal)48 static MPP_RET set_filter(void *hal)
49 {
50 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
51 Vp8eSps *sps = &ctx->sps;
52
53 if (sps->auto_filter_level) {
54 RK_U32 qp = 36;
55 Pps *p_pps = ctx->ppss.pps;
56 if (ctx->frame_type == VP8E_FRM_KEY) {
57 RK_S32 tmp = (qp * 64) / 128 + 8;
58 sps->filter_level = MPP_CLIP3(0, 63, tmp);
59 p_pps->level_sgm[0] = MPP_CLIP3(0, 63, (p_pps->qp_sgm[0] * 64) / 128 + 8);
60 p_pps->level_sgm[1] = MPP_CLIP3(0, 63, (p_pps->qp_sgm[1] * 64) / 128 + 8);
61 p_pps->level_sgm[2] = MPP_CLIP3(0, 63, (p_pps->qp_sgm[2] * 64) / 128 + 8);
62 p_pps->level_sgm[3] = MPP_CLIP3(0, 63, (p_pps->qp_sgm[3] * 64) / 128 + 8);
63 } else {
64 sps->filter_level = inter_level_tbl[qp];
65 p_pps->level_sgm[0] = inter_level_tbl[p_pps->qp_sgm[0]];
66 p_pps->level_sgm[1] = inter_level_tbl[p_pps->qp_sgm[1]];
67 p_pps->level_sgm[2] = inter_level_tbl[p_pps->qp_sgm[2]];
68 p_pps->level_sgm[3] = inter_level_tbl[p_pps->qp_sgm[3]];
69 }
70 }
71
72 if (sps->auto_filter_sharpness) {
73 sps->filter_sharpness = 0;
74 }
75
76 if (!sps->filter_delta_enable)
77 return MPP_NOK;
78
79 if (sps->filter_delta_enable == 2) {
80 sps->filter_delta_enable = 1;
81 return MPP_NOK;
82 }
83
84 if (sps->filter_level == 0) {
85 sps->ref_delta[0] = 0; /* Intra frame */
86 sps->ref_delta[1] = 0; /* Last frame */
87 sps->ref_delta[2] = 0; /* Golden frame */
88 sps->ref_delta[3] = 0; /* Altref frame */
89 sps->mode_delta[0] = 0; /* BPRED */
90 sps->mode_delta[1] = 0; /* Zero */
91 sps->mode_delta[2] = 0; /* New mv */
92 sps->mode_delta[3] = 0; /* Split mv */
93 return MPP_NOK;
94 }
95
96 if (!ctx->picbuf.cur_pic->ipf && !ctx->picbuf.cur_pic->grf &&
97 !ctx->picbuf.cur_pic->arf) {
98 memcpy(sps->ref_delta, sps->old_ref_delta, sizeof(sps->ref_delta));
99 memcpy(sps->mode_delta, sps->old_mode_delta, sizeof(sps->mode_delta));
100 return MPP_NOK;
101 }
102
103 sps->ref_delta[0] = 2; /* Intra frame */
104 sps->ref_delta[1] = 0; /* Last frame */
105 sps->ref_delta[2] = -2; /* Golden frame */
106 sps->ref_delta[3] = -2; /* Altref frame */
107
108 sps->mode_delta[0] = 4; /* BPRED */
109 sps->mode_delta[1] = -2; /* Zero */
110 sps->mode_delta[2] = 2; /* New mv */
111 sps->mode_delta[3] = 4; /* Split mv */
112
113 {
114 RK_U32 i = 0;
115 for (i = 0; i < 4; i++) {
116 sps->ref_delta[i] = MPP_CLIP3(-0x3f, 0x3f, sps->ref_delta[i]);
117 sps->mode_delta[i] = MPP_CLIP3(-0x3f, 0x3f, sps->mode_delta[i]);
118 }
119 }
120 return MPP_OK;
121 }
122
set_segmentation(void * hal)123 static MPP_RET set_segmentation(void *hal)
124 {
125 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
126
127 Vp8ePps *ppss = &ctx->ppss;
128 Pps *pps = ppss->pps;
129 Vp8eHwCfg *hw_cfg = &ctx->hw_cfg;
130 Vp8eVpuBuf *buffers = (Vp8eVpuBuf *)ctx->buffers;
131
132 {
133 RK_S32 qp = ctx->rc->qp_hdr;
134
135 if (hw_cfg->roi1_delta_qp)
136 pps->qp_sgm[1] = MPP_CLIP3(0, 127, qp - hw_cfg->roi1_delta_qp);
137
138 if (hw_cfg->roi2_delta_qp)
139 pps->qp_sgm[2] = MPP_CLIP3(0, 127, qp - hw_cfg->roi2_delta_qp);
140 }
141
142 {
143 RK_U32 x, y, mb, mask, id;
144 RK_U32 *map = mpp_buffer_get_ptr(buffers->hw_segment_map_buf);
145 RK_U32 *map_bck = map;
146 RK_U32 mapSize = (ctx->mb_per_frame + 15) / 16 * 8;
147
148 if (hw_cfg->roi1_delta_qp || hw_cfg->roi2_delta_qp) {
149 pps->segment_enabled = 1;
150
151 memset(pps->sgm.id_cnt, 0, sizeof(pps->sgm.id_cnt));
152
153 for (y = 0, mb = 0, mask = 0; y < ctx->mb_per_col; y++) {
154 for (x = 0; x < ctx->mb_per_row; x++) {
155 id = 0;
156 if ((x >= hw_cfg->roi1_left) && (x <= hw_cfg->roi1_right) &&
157 (y >= hw_cfg->roi1_top) && (y <= hw_cfg->roi1_bottom))
158 id = 1;
159 if ((x >= hw_cfg->roi1_left) && (x <= hw_cfg->roi2_right) &&
160 (y >= hw_cfg->roi2_top) && (y <= hw_cfg->roi2_bottom))
161 id = 2;
162
163 pps->sgm.id_cnt[id]++;
164
165 mask |= id << (28 - 4 * (mb % 8));
166 if ((mb % 8) == 7) {
167 *map++ = mask;
168 mask = 0;
169 }
170 mb++;
171 }
172 }
173 *map++ = mask;
174 vp8e_swap_endian(map_bck, mapSize);
175 } else if (pps->segment_enabled && pps->sgm.map_modified) {
176 memset(pps->sgm.id_cnt, 0, sizeof(pps->sgm.id_cnt));
177
178 for (mb = 0, mask = 0; mb < mapSize / 4; mb++) {
179 mask = map[mb];
180 for (x = 0; x < 8; x++) {
181 if (mb * 8 + x < ctx->mb_per_frame) {
182 id = (mask >> (28 - 4 * x)) & 0xF;
183 pps->sgm.id_cnt[id]++;
184 }
185 }
186 }
187 vp8e_swap_endian(map_bck, mapSize);
188 }
189 }
190 if (ctx->picbuf.cur_pic->i_frame || !pps->segment_enabled) {
191 memset(ppss->qp_sgm, 0xff, sizeof(ppss->qp_sgm));
192 memset(ppss->level_sgm, 0xff, sizeof(ppss->level_sgm));
193 ppss->prev_pps = NULL;
194 } else
195 ppss->prev_pps = ppss->pps;
196
197 return MPP_OK;
198 }
199
set_intra_pred_penalties(Vp8eHwCfg * hw_cfg,RK_U32 qp)200 static void set_intra_pred_penalties(Vp8eHwCfg *hw_cfg, RK_U32 qp)
201 {
202 RK_S32 i, tmp;
203
204 /* Intra 4x4 mode */
205 tmp = qp * 2 + 8;
206 for (i = 0; i < 10; i++) {
207 hw_cfg->intra_b_mode_penalty[i] = (intra4_mode_tree_penalty_tbl[i] * tmp) >> 8;
208 }
209
210 /* Intra 16x16 mode */
211 tmp = qp * 2 + 64;
212 for (i = 0; i < 4; i++) {
213 hw_cfg->intra_mode_penalty[i] = (intra16_mode_tree_penalty_tbl[i] * tmp) >> 8;
214 }
215
216 /* If favor has not been set earlier by testId use default */
217 if (hw_cfg->intra_16_favor == -1)
218 hw_cfg->intra_16_favor = qp * 1024 / 128;
219 }
220
set_hdr_segmentation(Vp8ePutBitBuf * bitbuf,Vp8ePps * ppss,Vp8eHalEntropy * entropy)221 static void set_hdr_segmentation(Vp8ePutBitBuf *bitbuf, Vp8ePps *ppss,
222 Vp8eHalEntropy *entropy)
223 {
224 RK_S32 i, tmp;
225 RK_U8 data_modified = 0;
226
227 Pps *pps = ppss->pps;
228 Sgm *sgm = &ppss->pps->sgm;
229
230 if (memcmp(ppss->qp_sgm, pps->qp_sgm, sizeof(ppss->qp_sgm)))
231 data_modified = 1;
232
233 if (memcmp(ppss->level_sgm, pps->level_sgm, sizeof(ppss->level_sgm)))
234 data_modified = 1;
235
236 if (!ppss->prev_pps) {
237 sgm->map_modified = 1;
238 }
239
240 vp8e_put_lit(bitbuf, sgm->map_modified, 1);
241 vp8e_put_lit(bitbuf, data_modified, 1);
242
243 if (data_modified) {
244 vp8e_put_lit(bitbuf, 1, 1);
245
246 for (i = 0; i < SGM_CNT; i++) {
247 tmp = pps->qp_sgm[i];
248 vp8e_put_lit(bitbuf, 1, 1);
249 vp8e_put_lit(bitbuf, MPP_ABS(tmp), 7);
250 vp8e_put_lit(bitbuf, tmp < 0, 1);
251 }
252
253 for (i = 0; i < SGM_CNT; i++) {
254 tmp = pps->level_sgm[i];
255 vp8e_put_lit(bitbuf, 1, 1);
256 vp8e_put_lit(bitbuf, MPP_ABS(tmp), 6);
257 vp8e_put_lit(bitbuf, tmp < 0, 1);
258 }
259 }
260
261 if (sgm->map_modified) {
262 RK_S32 sum1 = sgm->id_cnt[0] + sgm->id_cnt[1];
263 RK_S32 sum2 = sgm->id_cnt[2] + sgm->id_cnt[3];
264
265 tmp = 255 * sum1 / (sum1 + sum2);
266 entropy->segment_prob[0] = MPP_CLIP3(1, 255, tmp);
267
268 tmp = sum1 ? 255 * sgm->id_cnt[0] / sum1 : 255;
269 entropy->segment_prob[1] = MPP_CLIP3(1, 255, tmp);
270
271 tmp = sum2 ? 255 * sgm->id_cnt[2] / sum2 : 255;
272 entropy->segment_prob[2] = MPP_CLIP3(1, 255, tmp);
273
274 for (i = 0; i < 3; i++) {
275 if (sgm->id_cnt[i] != 0) {
276 vp8e_put_lit(bitbuf, 1, 1);
277 vp8e_put_lit(bitbuf, entropy->segment_prob[i], 8);
278 } else {
279 vp8e_put_lit(bitbuf, 0, 1);
280 }
281 }
282 }
283
284 memcpy(ppss->qp_sgm, pps->qp_sgm, sizeof(ppss->qp_sgm));
285 memcpy(ppss->level_sgm, pps->level_sgm, sizeof(ppss->level_sgm));
286 }
287
set_filter_level_delta(Vp8ePutBitBuf * bitbuf,Vp8eSps * sps)288 static void set_filter_level_delta(Vp8ePutBitBuf *bitbuf, Vp8eSps *sps)
289 {
290 RK_S32 i, tmp;
291 RK_U8 update = 0;
292 RK_S32 mode_update[4];
293 RK_S32 ref_update[4];
294
295 for (i = 0; i < 4; i++) {
296 mode_update[i] = sps->mode_delta[i] != sps->old_mode_delta[i];
297 ref_update[i] = sps->ref_delta[i] != sps->old_ref_delta[i];
298 if (mode_update[i] || ref_update[i])
299 update = 1;
300 }
301
302 if (!sps->refresh_entropy)
303 update = 1;
304
305 vp8e_put_lit(bitbuf, update, 1);
306 if (!update)
307 return;
308
309 for (i = 0; i < 4; i++) {
310 vp8e_put_lit(bitbuf, ref_update[i], 1);
311 if (ref_update[i]) {
312 tmp = sps->ref_delta[i];
313 vp8e_put_lit(bitbuf, MPP_ABS(tmp), 6);
314 vp8e_put_lit(bitbuf, tmp < 0, 1);
315 }
316 }
317
318 for (i = 0; i < 4; i++) {
319 vp8e_put_lit(bitbuf, mode_update[i], 1);
320 if (mode_update[i]) {
321 tmp = sps->mode_delta[i];
322 vp8e_put_lit(bitbuf, MPP_ABS(tmp), 6);
323 vp8e_put_lit(bitbuf, tmp < 0, 1);
324 }
325 }
326
327 memcpy(sps->old_ref_delta, sps->ref_delta, sizeof(sps->ref_delta));
328 memcpy(sps->old_mode_delta, sps->mode_delta, sizeof(sps->mode_delta));
329 }
330
set_frame_header(void * hal)331 static MPP_RET set_frame_header(void *hal)
332 {
333 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
334
335 Vp8eSps *sps = &ctx->sps;
336 Pps *pps = ctx->ppss.pps;
337
338 Vp8ePutBitBuf *bitbuf = &ctx->bitbuf[1];
339 HalVp8eRefPic *cur_pic = ctx->picbuf.cur_pic;
340 Vp8eHalEntropy *entropy = &ctx->entropy;
341
342 if (cur_pic->i_frame) {
343 vp8e_put_lit(bitbuf, sps->color_type, 1);
344
345 vp8e_put_lit(bitbuf, sps->clamp_type, 1);
346 }
347
348 vp8e_put_lit(bitbuf, pps->segment_enabled, 1);
349 if (pps->segment_enabled)
350 set_hdr_segmentation(bitbuf, &ctx->ppss, entropy);
351
352 vp8e_put_lit(bitbuf, sps->filter_type, 1);
353
354 vp8e_put_lit(bitbuf, sps->filter_level, 6);
355
356 vp8e_put_lit(bitbuf, sps->filter_sharpness, 3);
357
358 vp8e_put_lit(bitbuf, sps->filter_delta_enable, 1);
359 if (sps->filter_delta_enable) {
360 /* Filter level delta references reset by key frame */
361 if (cur_pic->i_frame) {
362 memset(sps->old_ref_delta, 0, sizeof(sps->ref_delta));
363 memset(sps->old_mode_delta, 0, sizeof(sps->mode_delta));
364 }
365 set_filter_level_delta(bitbuf, sps);
366 }
367
368 vp8e_put_lit(bitbuf, sps->dct_partitions, 2);
369
370 vp8e_put_lit(bitbuf, ctx->rc->qp_hdr, 7);
371
372 vp8e_put_lit(bitbuf, 0, 1);
373 vp8e_put_lit(bitbuf, 0, 1);
374 vp8e_put_lit(bitbuf, 0, 1);
375 vp8e_put_lit(bitbuf, 0, 1);
376 vp8e_put_lit(bitbuf, 0, 1);
377
378 if (!cur_pic->i_frame) {
379 HalVp8eRefPic *ref_pic_list = ctx->picbuf.ref_pic_list;
380
381 vp8e_put_lit(bitbuf, cur_pic->grf, 1); /* Grf refresh */
382 vp8e_put_lit(bitbuf, cur_pic->arf, 1); /* Arf refresh */
383
384 if (!cur_pic->grf) {
385 if (ref_pic_list[0].grf) {
386 vp8e_put_lit(bitbuf, 1, 2); /* Ipf -> grf */
387 } else if (ref_pic_list[2].grf) {
388 vp8e_put_lit(bitbuf, 2, 2); /* Arf -> grf */
389 } else {
390 vp8e_put_lit(bitbuf, 0, 2); /* Not updated */
391 }
392 }
393
394 if (!cur_pic->arf) {
395 if (ref_pic_list[0].arf) {
396 vp8e_put_lit(bitbuf, 1, 2); /* Ipf -> arf */
397 } else if (ref_pic_list[1].arf) {
398 vp8e_put_lit(bitbuf, 2, 2); /* Grf -> arf */
399 } else {
400 vp8e_put_lit(bitbuf, 0, 2); /* Not updated */
401 }
402 }
403
404 vp8e_put_lit(bitbuf, sps->sing_bias[1], 1); /* Grf */
405 vp8e_put_lit(bitbuf, sps->sing_bias[2], 1); /* Arf */
406 }
407
408 vp8e_put_lit(bitbuf, sps->refresh_entropy, 1);
409 if (!cur_pic->i_frame) {
410 vp8e_put_lit(bitbuf, cur_pic->ipf, 1);
411 }
412 vp8e_calc_coeff_prob(bitbuf, &entropy->coeff_prob, &entropy->old_coeff_prob);
413 vp8e_put_lit(bitbuf, 1, 1);
414 vp8e_put_lit(bitbuf, entropy->skip_false_prob, 8);
415
416 if (cur_pic->i_frame)
417 return MPP_OK;
418
419 vp8e_put_lit(bitbuf, entropy->intra_prob, 8);
420 vp8e_put_lit(bitbuf, entropy->last_prob, 8);
421 vp8e_put_lit(bitbuf, entropy->gf_prob, 8);
422 vp8e_put_lit(bitbuf, 0, 1);
423 vp8e_put_lit(bitbuf, 0, 1);
424 vp8e_calc_mv_prob(bitbuf, &entropy->mv_prob, &entropy->old_mv_prob);
425
426 return MPP_OK;
427 }
428
set_new_frame(void * hal)429 static MPP_RET set_new_frame(void *hal)
430 {
431 RK_S32 i;
432 RK_S32 qp;
433 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
434 Vp8eSps *sps = &ctx->sps;
435 Vp8eHwCfg *hw_cfg = &ctx->hw_cfg;
436 Vp8eVpuBuf *buffers = (Vp8eVpuBuf *)ctx->buffers;
437
438 hw_cfg->output_strm_size /= 8;
439 hw_cfg->output_strm_size &= (~0x07);
440
441 {
442 hw_cfg->output_strm_offset += ctx->bitbuf[1].byte_cnt;
443 hw_cfg->first_free_bit = (hw_cfg->output_strm_offset & 0x07) * 8;
444 }
445
446 if (hw_cfg->first_free_bit != 0) {
447 RK_U32 val;
448 RK_U8 *pTmp = (RK_U8 *)((size_t)(ctx->bitbuf[1].data) & (~0x07));
449
450 for (val = 6; val >= hw_cfg->first_free_bit / 8; val--) {
451 pTmp[val] = 0;
452 }
453
454 val = pTmp[0] << 24;
455 val |= pTmp[1] << 16;
456 val |= pTmp[2] << 8;
457 val |= pTmp[3];
458
459 hw_cfg->strm_start_msb = val;
460
461 if (hw_cfg->first_free_bit > 32) {
462 val = pTmp[4] << 24;
463 val |= pTmp[5] << 16;
464 val |= pTmp[6] << 8;
465
466 hw_cfg->strm_start_lsb = val;
467 } else {
468 hw_cfg->strm_start_lsb = 0;
469 }
470 } else {
471 hw_cfg->strm_start_msb = hw_cfg->strm_start_lsb = 0;
472 }
473
474 if (sps->quarter_pixel_mv == 0) {
475 hw_cfg->disable_qp_mv = 1;
476 } else if (sps->quarter_pixel_mv == 1) {
477 if (ctx->mb_per_frame > 8160)
478 hw_cfg->disable_qp_mv = 1;
479 else
480 hw_cfg->disable_qp_mv = 0;
481 } else {
482 hw_cfg->disable_qp_mv = 0;
483 }
484
485 hw_cfg->enable_cabac = 1;
486
487 if (sps->split_mv == 0)
488 hw_cfg->split_mv_mode = 0;
489 else if (sps->split_mv == 1) {
490 if (ctx->mb_per_frame > 1584)
491 hw_cfg->split_mv_mode = 0;
492 else
493 hw_cfg->split_mv_mode = 1;
494 } else
495 hw_cfg->split_mv_mode = 1;
496
497 qp = ctx->rc->qp_hdr;
498 if (hw_cfg->inter_favor == -1) {
499 RK_S32 tmp = 128 - ctx->entropy.intra_prob;
500
501 if (tmp < 0) {
502 hw_cfg->inter_favor = tmp & 0xFFFF;
503 } else {
504 tmp = qp * 2 - 40;
505 hw_cfg->inter_favor = MPP_MAX(0, tmp);
506 }
507 }
508
509 if (hw_cfg->diff_mv_penalty[0] == -1)
510 hw_cfg->diff_mv_penalty[0] = 64 / 2;
511 if (hw_cfg->diff_mv_penalty[1] == -1)
512 hw_cfg->diff_mv_penalty[1] = 60 / 2 * 32;
513 if (hw_cfg->diff_mv_penalty[2] == -1)
514 hw_cfg->diff_mv_penalty[2] = 8;
515 if (hw_cfg->skip_penalty == -1)
516 hw_cfg->skip_penalty = (qp >= 100) ? (3 * qp / 4) : 0; /* Zero/nearest/near */
517 if (hw_cfg->golden_penalty == -1)
518 hw_cfg->golden_penalty = MPP_MAX(0, 5 * qp / 4 - 10);
519 if (hw_cfg->split_penalty[0] == 0)
520 hw_cfg->split_penalty[0] = MPP_MIN(1023, vp8_split_penalty_tbl[qp] / 2);
521 if (hw_cfg->split_penalty[1] == 0)
522 hw_cfg->split_penalty[1] = MPP_MIN(1023, (2 * vp8_split_penalty_tbl[qp] + 40) / 4);
523 if (hw_cfg->split_penalty[3] == 0)
524 hw_cfg->split_penalty[3] = MPP_MIN(511, (8 * vp8_split_penalty_tbl[qp] + 500) / 16);
525
526 for (i = 0; i < 128; i++) {
527 RK_S32 y, x;
528
529 hw_cfg->dmv_penalty[i] = i * 2;
530 y = vp8e_calc_cost_mv(i * 2, ctx->entropy.mv_prob[0]); /* mv y */
531 x = vp8e_calc_cost_mv(i * 2, ctx->entropy.mv_prob[1]); /* mv x */
532 hw_cfg->dmv_qpel_penalty[i] = MPP_MIN(255, (y + x + 1) / 2 * weight_tbl[qp] >> 8);
533 }
534
535 for (i = 0; i < 4; i++) {
536 qp = ctx->ppss.pps->qp_sgm[i];
537 hw_cfg->y1_quant_dc[i] = ctx->qp_y1[qp].quant[0];
538 hw_cfg->y1_quant_ac[i] = ctx->qp_y1[qp].quant[1];
539 hw_cfg->y2_quant_dc[i] = ctx->qp_y2[qp].quant[0];
540 hw_cfg->y2_quant_ac[i] = ctx->qp_y2[qp].quant[1];
541 hw_cfg->ch_quant_dc[i] = ctx->qp_ch[qp].quant[0];
542 hw_cfg->ch_quant_ac[i] = ctx->qp_ch[qp].quant[1];
543 hw_cfg->y1_zbin_dc[i] = ctx->qp_y1[qp].zbin[0];
544 hw_cfg->y1_zbin_ac[i] = ctx->qp_y1[qp].zbin[1];
545 hw_cfg->y2_zbin_dc[i] = ctx->qp_y2[qp].zbin[0];
546 hw_cfg->y2_zbin_ac[i] = ctx->qp_y2[qp].zbin[1];
547 hw_cfg->ch_zbin_dc[i] = ctx->qp_ch[qp].zbin[0];
548 hw_cfg->ch_zbin_ac[i] = ctx->qp_ch[qp].zbin[1];
549 hw_cfg->y1_round_dc[i] = ctx->qp_y1[qp].round[0];
550 hw_cfg->y1_round_ac[i] = ctx->qp_y1[qp].round[1];
551 hw_cfg->y2_round_dc[i] = ctx->qp_y2[qp].round[0];
552 hw_cfg->y2_round_ac[i] = ctx->qp_y2[qp].round[1];
553 hw_cfg->ch_round_dc[i] = ctx->qp_ch[qp].round[0];
554 hw_cfg->ch_round_ac[i] = ctx->qp_ch[qp].round[1];
555 hw_cfg->y1_dequant_dc[i] = ctx->qp_y1[qp].dequant[0];
556 hw_cfg->y1_dequant_ac[i] = ctx->qp_y1[qp].dequant[1];
557 hw_cfg->y2_dequant_dc[i] = ctx->qp_y2[qp].dequant[0];
558 hw_cfg->y2_dequant_ac[i] = ctx->qp_y2[qp].dequant[1];
559 hw_cfg->ch_dequant_dc[i] = ctx->qp_ch[qp].dequant[0];
560 hw_cfg->ch_dequant_ac[i] = ctx->qp_ch[qp].dequant[1];
561
562 hw_cfg->filter_level[i] = ctx->ppss.pps->level_sgm[i];
563 }
564
565 hw_cfg->bool_enc_value = ctx->bitbuf[1].bottom;
566 hw_cfg->bool_enc_value_bits = 24 - ctx->bitbuf[1].bits_left;
567 hw_cfg->bool_enc_range = ctx->bitbuf[1].range;
568
569 if (ctx->picbuf.cur_pic->i_frame)
570 hw_cfg->frame_coding_type = 1;
571 else
572 hw_cfg->frame_coding_type = 0;
573
574 hw_cfg->size_tbl_base = mpp_buffer_get_fd(buffers->hw_size_table_buf);
575
576 hw_cfg->dct_partitions = sps->dct_partitions;
577 hw_cfg->filter_disable = sps->filter_type;
578 hw_cfg->filter_sharpness = sps->filter_sharpness;
579 hw_cfg->segment_enable = ctx->ppss.pps->segment_enabled;
580 hw_cfg->segment_map_update = ctx->ppss.pps->sgm.map_modified;
581
582 ctx->ppss.pps->sgm.map_modified = 0;
583
584 for (i = 0; i < 4; i++) {
585 hw_cfg->lf_ref_delta[i] = sps->ref_delta[i];
586 hw_cfg->lf_mode_delta[i] = sps->mode_delta[i];
587 }
588
589 set_intra_pred_penalties(hw_cfg, qp);
590
591 memset(mpp_buffer_get_ptr(buffers->hw_prob_count_buf),
592 0, VP8_PROB_COUNT_BUF_SIZE);
593
594 return MPP_OK;
595 }
596
set_code_frame(void * hal)597 static MPP_RET set_code_frame(void *hal)
598 {
599 HalVp8eCtx *ctx = (HalVp8eCtx *) hal;
600
601 vp8e_init_entropy(ctx);
602 set_segmentation(ctx);
603 set_filter(ctx);
604 set_frame_header(ctx);
605 set_new_frame(ctx);
606 vp8e_write_entropy_tables(ctx);
607
608 return MPP_OK;
609 }
610
reset_refpic(HalVp8eRefPic * refPic)611 static void reset_refpic(HalVp8eRefPic *refPic)
612 {
613 refPic->poc = -1;
614 refPic->i_frame = 0;
615 refPic->p_frame = 0;
616 refPic->show = 0;
617 refPic->ipf = 0;
618 refPic->arf = 0;
619 refPic->grf = 0;
620 refPic->search = 0;
621 }
622
init_ref_pic_list(HalVp8ePicBuf * picbuf)623 static void init_ref_pic_list(HalVp8ePicBuf *picbuf)
624 {
625 RK_S32 i, j;
626
627 HalVp8eRefPic *ref_pic = picbuf->ref_pic;
628 HalVp8eRefPic *cur_pic = picbuf->cur_pic;
629 HalVp8eRefPic *ref_pic_list = picbuf->ref_pic_list;
630
631 j = 0;
632 for (i = 0; i < picbuf->size + 1; i++) {
633 if (ref_pic[i].ipf && (&ref_pic[i] != cur_pic)) {
634 ref_pic_list[j++] = ref_pic[i];
635 break;
636 }
637 }
638
639 for (i = 0; i < picbuf->size + 1; i++) {
640 if (ref_pic[i].grf && (&ref_pic[i] != cur_pic)) {
641 ref_pic_list[j++] = ref_pic[i];
642 break;
643 }
644 }
645
646 for (i = 0; i < picbuf->size + 1; i++) {
647 if (ref_pic[i].arf && (&ref_pic[i] != cur_pic)) {
648 ref_pic_list[j] = ref_pic[i];
649 break;
650 }
651 }
652
653 for (i = 0; i < picbuf->size; i++) {
654 ref_pic_list[i].ipf = 0;
655 ref_pic_list[i].grf = 0;
656 ref_pic_list[i].arf = 0;
657 }
658 }
659
init_picbuf(void * hal)660 static MPP_RET init_picbuf(void *hal)
661 {
662 RK_S32 i = 0;
663
664 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
665 HalVp8ePicBuf *buf = &ctx->picbuf;
666
667 if (buf->cur_pic->i_frame) {
668 buf->cur_pic->p_frame = 0;
669 buf->cur_pic->ipf = 1;
670 buf->cur_pic->grf = 1;
671 buf->cur_pic->arf = 1;
672
673 for (i = 0; i < 4; i++) {
674 if (&buf->ref_pic[i] != buf->cur_pic) {
675 reset_refpic(&buf->ref_pic[i]);
676 }
677 }
678 }
679
680 for (i = 0; i < 3; i++) {
681 reset_refpic(&buf->ref_pic_list[i]);
682 }
683
684 init_ref_pic_list(buf);
685
686 return MPP_OK;
687 }
688
set_picbuf_ref(void * hal)689 static MPP_RET set_picbuf_ref(void *hal)
690 {
691 RK_S32 i = 0;
692
693 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
694 HalVp8ePicBuf *pic_buf = &ctx->picbuf;
695 Vp8eHwCfg *hw_cfg = &ctx->hw_cfg;
696 HalVp8eRefPic *ref_pic_list = pic_buf->ref_pic_list;
697
698 {
699 RK_S32 no_grf = 0;
700 RK_S32 no_arf = 0;
701 if (pic_buf->size < 2) {
702 no_grf = 1;
703 pic_buf->cur_pic->grf = 0;
704 }
705 if (pic_buf->size < 3) {
706 no_arf = 1;
707 pic_buf->cur_pic->arf = 0;
708 }
709
710 for (i = 0; i < pic_buf->size; i++) {
711 if (pic_buf->cur_pic->grf || no_grf)
712 pic_buf->ref_pic_list[i].grf = 0;
713 if (pic_buf->cur_pic->arf || no_arf)
714 pic_buf->ref_pic_list[i].arf = 0;
715 }
716 }
717 {
718 RK_S32 ref_idx = -1;
719 RK_S32 ref_idx2 = -1;
720
721 for (i = 0; i < 3; i++) {
722 if ((i < pic_buf->size) && ref_pic_list[i].search) {
723 if (ref_idx == -1)
724 ref_idx = i;
725 else if (ref_idx2 == -1)
726 ref_idx2 = i;
727 else
728 ref_pic_list[i].search = 0;
729 } else {
730 ref_pic_list[i].search = 0;
731 }
732 }
733
734 if (ref_idx == -1)
735 ref_idx = 0;
736
737 hw_cfg->mv_ref_idx[0] = hw_cfg->mv_ref_idx[1] = ref_idx;
738
739 if (pic_buf->cur_pic->p_frame) {
740 pic_buf->ref_pic_list[ref_idx].search = 1;
741
742 hw_cfg->internal_img_lum_base_r[0] = ref_pic_list[ref_idx].picture.lum;
743 hw_cfg->internal_img_chr_base_r[0] = ref_pic_list[ref_idx].picture.cb;
744 hw_cfg->internal_img_lum_base_r[1] = ref_pic_list[ref_idx].picture.lum;
745 hw_cfg->internal_img_chr_base_r[1] = ref_pic_list[ref_idx].picture.cb;
746 hw_cfg->mv_ref_idx[0] = hw_cfg->mv_ref_idx[1] = ref_idx;
747 hw_cfg->ref2_enable = 0;
748
749 if (ref_idx2 != -1) {
750 hw_cfg->internal_img_lum_base_r[1] = ref_pic_list[ref_idx2].picture.lum;
751 hw_cfg->internal_img_chr_base_r[1] = ref_pic_list[ref_idx2].picture.cb;
752 hw_cfg->mv_ref_idx[1] = ref_idx2;
753 hw_cfg->ref2_enable = 1;
754 }
755 }
756 }
757 hw_cfg->rec_write_disable = 0;
758
759 if (!pic_buf->cur_pic->picture.lum) {
760 HalVp8eRefPic *cur_pic = pic_buf->cur_pic;
761 HalVp8eRefPic *cand;
762 RK_S32 recIdx = -1;
763
764 for (i = 0; i < pic_buf->size + 1; i++) {
765 cand = &pic_buf->ref_pic[i];
766 if (cand == cur_pic)
767 continue;
768 if (((cur_pic->ipf | cand->ipf) == cur_pic->ipf) &&
769 ((cur_pic->grf | cand->grf) == cur_pic->grf) &&
770 ((cur_pic->arf | cand->arf) == cur_pic->arf))
771 recIdx = i;
772 }
773
774 if (recIdx >= 0) {
775 cur_pic->picture.lum = pic_buf->ref_pic[recIdx].picture.lum;
776 pic_buf->ref_pic[recIdx].picture.lum = 0;
777 } else {
778 hw_cfg->rec_write_disable = 1;
779 }
780 }
781
782 hw_cfg->internal_img_lum_base_w = pic_buf->cur_pic->picture.lum;
783 hw_cfg->internal_img_chr_base_w = pic_buf->cur_pic->picture.cb;
784
785 return MPP_OK;
786 }
787
write_ivf_header(void * hal,RK_U8 * dst)788 void write_ivf_header(void *hal, RK_U8 *dst)
789 {
790 RK_U8 data[IVF_HDR_BYTES] = {0};
791
792 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
793
794 MppEncPrepCfg *prep = &ctx->cfg->prep;
795 MppEncRcCfg *rc = &ctx->cfg->rc;
796
797 data[0] = 'D';
798 data[1] = 'K';
799 data[2] = 'I';
800 data[3] = 'F';
801
802 data[6] = 32;
803
804 data[8] = 'V';
805 data[9] = 'P';
806 data[10] = '8';
807 data[11] = '0';
808
809 data[12] = prep->width & 0xff;
810 data[13] = (prep->width >> 8) & 0xff;
811 data[14] = prep->height & 0xff;
812 data[15] = (prep->height >> 8) & 0xff;
813
814 data[16] = rc->fps_out_num & 0xff;
815 data[17] = (rc->fps_out_num >> 8) & 0xff;
816 data[18] = (rc->fps_out_num >> 16) & 0xff;
817 data[19] = (rc->fps_out_num >> 24) & 0xff;
818
819 data[20] = rc->fps_out_denorm & 0xff;
820 data[21] = (rc->fps_out_denorm >> 8) & 0xff;
821 data[22] = (rc->fps_out_denorm >> 16) & 0xff;
822 data[23] = (rc->fps_out_denorm >> 24) & 0xff;
823
824 data[24] = ctx->frame_cnt & 0xff;
825 data[25] = (ctx->frame_cnt >> 8) & 0xff;
826 data[26] = (ctx->frame_cnt >> 16) & 0xff;
827 data[27] = (ctx->frame_cnt >> 24) & 0xff;
828
829 memcpy(dst, data, IVF_HDR_BYTES);
830 }
831
write_ivf_frame(void * hal,RK_U8 * out)832 static void write_ivf_frame(void *hal, RK_U8 *out)
833 {
834 RK_U8 data[IVF_FRM_BYTES];
835
836 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
837 RK_S32 byte_cnt = ctx->frame_size;
838
839 data[0] = byte_cnt & 0xff;
840 data[1] = (byte_cnt >> 8) & 0xff;
841 data[2] = (byte_cnt >> 16) & 0xff;
842 data[3] = (byte_cnt >> 24) & 0xff;
843
844 data[4] = ctx->frame_cnt & 0xff;
845 data[5] = (ctx->frame_cnt >> 8) & 0xff;
846 data[6] = (ctx->frame_cnt >> 16) & 0xff;
847 data[7] = (ctx->frame_cnt >> 24) & 0xff;
848 data[8] = (ctx->frame_cnt >> 32) & 0xff;
849 data[9] = (ctx->frame_cnt >> 40) & 0xff;
850 data[10] = (ctx->frame_cnt >> 48) & 0xff;
851 data[11] = (ctx->frame_cnt >> 56) & 0xff;
852
853 memcpy(out, data, IVF_FRM_BYTES);
854 }
855
set_frame_tag(void * hal)856 static MPP_RET set_frame_tag(void *hal)
857 {
858 RK_S32 tmp = 0;
859 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
860
861 HalVp8ePicBuf *pic_buf = &ctx->picbuf;
862 HalVp8eRefPic *cur_pic = pic_buf->cur_pic;
863 Vp8ePutBitBuf *bitbuf = &ctx->bitbuf[0];
864 RK_S32 pic_height_in_pixel;
865 RK_S32 pic_width_in_pixel;
866 RK_S32 h_scaling;
867 RK_S32 v_scaling;
868
869 tmp = ((ctx->bitbuf[1].byte_cnt) << 5) |
870 ((cur_pic->show ? 1 : 0) << 4) |
871 (ctx->sps.profile << 1) |
872 (cur_pic->i_frame ? 0 : 1);
873
874 vp8e_put_byte(bitbuf, tmp & 0xff);
875
876 vp8e_put_byte(bitbuf, (tmp >> 8) & 0xff);
877
878 vp8e_put_byte(bitbuf, (tmp >> 16) & 0xff);
879
880 if (!cur_pic->i_frame)
881 return MPP_NOK;
882
883 vp8e_put_byte(bitbuf, 0x9d);
884 vp8e_put_byte(bitbuf, 0x01);
885 vp8e_put_byte(bitbuf, 0x2a);
886
887 if (ctx->hw_cfg.input_rotation) {
888 pic_height_in_pixel = ctx->sps.pic_width_in_pixel;
889 pic_width_in_pixel = ctx->sps.pic_height_in_pixel;
890 h_scaling = ctx->sps.vertical_scaling;
891 v_scaling = ctx->sps.horizontal_scaling;
892 } else {
893 pic_height_in_pixel = ctx->sps.pic_height_in_pixel;
894 pic_width_in_pixel = ctx->sps.pic_width_in_pixel;
895 h_scaling = ctx->sps.horizontal_scaling;
896 v_scaling = ctx->sps.vertical_scaling;
897 }
898
899 tmp = pic_width_in_pixel | (h_scaling << 14);
900 vp8e_put_byte(bitbuf, tmp & 0xff);
901 vp8e_put_byte(bitbuf, tmp >> 8);
902
903 tmp = pic_height_in_pixel | (v_scaling << 14);
904 vp8e_put_byte(bitbuf, tmp & 0xff);
905 vp8e_put_byte(bitbuf, tmp >> 8);
906
907 return MPP_OK;
908 }
909
set_data_part_size(void * hal)910 static MPP_RET set_data_part_size(void *hal)
911 {
912 RK_S32 i = 0;
913 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
914
915 if (!ctx->sps.dct_partitions)
916 return MPP_NOK;
917
918 for (i = 2; i < ctx->sps.partition_cnt - 1; i++) {
919 Vp8ePutBitBuf *bitbuf = ctx->bitbuf;
920 RK_S32 tmp = bitbuf[i].data - bitbuf[i].p_data;
921 vp8e_put_byte(&bitbuf[1], tmp & 0xff);
922 vp8e_put_byte(&bitbuf[1], (tmp >> 8) & 0xff);
923 vp8e_put_byte(&bitbuf[1], (tmp >> 16) & 0xff);
924 }
925
926 return MPP_OK;
927 }
928
update_picbuf(HalVp8ePicBuf * picbuf)929 static MPP_RET update_picbuf(HalVp8ePicBuf *picbuf)
930 {
931 RK_S32 i , j;
932
933 HalVp8eRefPic *ref_pic_list = picbuf->ref_pic_list;
934 HalVp8eRefPic *ref_pic = picbuf->ref_pic;
935 HalVp8eRefPic *cur_pic = picbuf->cur_pic;
936
937 picbuf->last_pic = picbuf->cur_pic;
938
939 for (i = 0; i < picbuf->size + 1; i++) {
940 if (&ref_pic[i] == cur_pic)
941 continue;
942 if (cur_pic->ipf)
943 ref_pic[i].ipf = 0;
944 if (cur_pic->grf)
945 ref_pic[i].grf = 0;
946 if (cur_pic->arf)
947 ref_pic[i].arf = 0;
948 }
949
950 for (i = 0; i < picbuf->size; i++) {
951 for (j = 0; j < picbuf->size + 1; j++) {
952 if (ref_pic_list[i].grf)
953 ref_pic[j].grf = 0;
954 if (ref_pic_list[i].arf)
955 ref_pic[j].arf = 0;
956 }
957 }
958
959 for (i = 0; i < picbuf->size; i++) {
960 if (ref_pic_list[i].grf)
961 ref_pic_list[i].refPic->grf = 1;
962 if (ref_pic_list[i].arf)
963 ref_pic_list[i].refPic->arf = 1;
964 }
965
966 for (i = 0; i < picbuf->size + 1; i++) {
967 HalVp8eRefPic *tmp = &ref_pic[i];
968 if (!tmp->ipf && !tmp->arf && !tmp->grf) {
969 picbuf->cur_pic = &ref_pic[i];
970 break;
971 }
972 }
973
974 return MPP_OK;
975 }
976
set_parameter(void * hal)977 static MPP_RET set_parameter(void *hal)
978 {
979 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
980
981 Vp8eSps *sps = &ctx->sps;
982 Vp8eHwCfg *hw_cfg = &ctx->hw_cfg;
983 MppEncPrepCfg *set = &ctx->cfg->prep;
984
985 RK_S32 width = set->width;
986 RK_S32 height = set->height;
987 RK_S32 width_align = MPP_ALIGN(set->width, 16);
988 RK_S32 height_align = MPP_ALIGN(set->height, 16);
989 RK_U32 stride;
990 RK_U32 rotation = 0;
991
992 // do not support mirroring
993 if (set->mirroring)
994 mpp_err_f("Warning: do not support mirroring\n");
995
996 if (set->rotation == MPP_ENC_ROT_90)
997 rotation = 1;
998 else if (set->rotation == MPP_ENC_ROT_270)
999 rotation = 2;
1000 else if (set->rotation != MPP_ENC_ROT_0)
1001 mpp_err_f("Warning: only support 90 or 270 degree rotate, request rotate %d", rotation);
1002
1003 if (rotation) {
1004 MPP_SWAP(RK_S32, width, height);
1005 MPP_SWAP(RK_S32, width_align, height_align);
1006 }
1007
1008 stride = get_vepu_pixel_stride(&ctx->stride_cfg, width,
1009 set->hor_stride, set->format);
1010
1011 ctx->mb_per_frame = width_align / 16 * height_align / 16;
1012 ctx->mb_per_row = width_align / 16;
1013 ctx->mb_per_col = height_align / 16;
1014
1015 sps->pic_width_in_pixel = width_align;
1016 sps->pic_height_in_pixel = height_align;
1017 sps->pic_width_in_mbs = width_align / 16;
1018 sps->pic_height_in_mbs = height_align / 16;
1019 sps->horizontal_scaling = 0;
1020 sps->vertical_scaling = 0;
1021 sps->color_type = 0;
1022 sps->clamp_type = 0;
1023 sps->dct_partitions = 0;
1024 sps->partition_cnt = 2 + (1 << sps->dct_partitions);
1025 sps->profile = 1;
1026 sps->filter_type = 0;
1027 sps->filter_level = 0;
1028 sps->filter_sharpness = 0;
1029 sps->auto_filter_level = 1;
1030 sps->auto_filter_sharpness = 1;
1031 sps->quarter_pixel_mv = 1;
1032 sps->split_mv = 1;
1033 sps->refresh_entropy = 1;
1034 memset(sps->sing_bias, 0, sizeof(sps->sing_bias));
1035
1036 sps->filter_delta_enable = 1;
1037 memset(sps->ref_delta, 0, sizeof(sps->ref_delta));
1038 memset(sps->mode_delta, 0, sizeof(sps->mode_delta));
1039
1040 hw_cfg->input_rotation = rotation;
1041
1042 {
1043 RK_U32 tmp = 0;
1044 RK_U32 hor_offset_src = 0;
1045 RK_U32 ver_offset_src = 0;
1046 RK_U8 video_stab = 0;
1047
1048 if (set->format == MPP_FMT_YUV420SP || set->format == MPP_FMT_YUV420P) {
1049 tmp = ver_offset_src;
1050 tmp *= stride;
1051 tmp += hor_offset_src;
1052 hw_cfg->input_lum_base += (tmp & (~7));
1053 hw_cfg->input_luma_base_offset = tmp & 7;
1054
1055 if (video_stab)
1056 hw_cfg->vs_next_luma_base += (tmp & (~7));
1057
1058 if (set->format == MPP_FMT_YUV420P) {
1059 tmp = ver_offset_src / 2;
1060 tmp *= stride / 2;
1061 tmp += hor_offset_src / 2;
1062
1063 hw_cfg->input_cb_base += (tmp & (~7));
1064 hw_cfg->input_cr_base += (tmp & (~7));
1065 hw_cfg->input_chroma_base_offset = tmp & 7;
1066 } else {
1067 tmp = ver_offset_src / 2;
1068 tmp *= stride / 2;
1069 tmp += hor_offset_src / 2;
1070 tmp *= 2;
1071
1072 hw_cfg->input_cb_base += (tmp & (~7));
1073 hw_cfg->input_chroma_base_offset = tmp & 7;
1074 }
1075 } else if (set->format <= MPP_FMT_BGR444 && set->format >= MPP_FMT_RGB565) {
1076 tmp = ver_offset_src;
1077 tmp *= stride;
1078 tmp += hor_offset_src;
1079 tmp *= 2;
1080
1081 hw_cfg->input_lum_base += (tmp & (~7));
1082 hw_cfg->input_luma_base_offset = tmp & 7;
1083 hw_cfg->input_chroma_base_offset = (hw_cfg->input_luma_base_offset / 4) * 4;
1084
1085 if (video_stab)
1086 hw_cfg->vs_next_luma_base += (tmp & (~7));
1087 } else {
1088 tmp = ver_offset_src;
1089 tmp *= stride;
1090 tmp += hor_offset_src;
1091 tmp *= 4;
1092
1093 hw_cfg->input_lum_base += (tmp & (~7));
1094 hw_cfg->input_luma_base_offset = (tmp & 7) / 2;
1095
1096 if (video_stab)
1097 hw_cfg->vs_next_luma_base += (tmp & (~7));
1098 }
1099
1100 hw_cfg->mbs_in_row = width_align / 16;
1101 hw_cfg->mbs_in_col = height_align / 16;
1102 hw_cfg->pixels_on_row = stride;
1103 }
1104 if (width & 0x0F)
1105 hw_cfg->x_fill = (16 - (width & 0x0F)) / 4;
1106 else
1107 hw_cfg->x_fill = 0;
1108
1109 if (height & 0x0F)
1110 hw_cfg->y_fill = 16 - (height & 0x0F);
1111 else
1112 hw_cfg->y_fill = 0;
1113
1114 hw_cfg->vs_mode = 0;
1115
1116 switch (set->color) {
1117 case MPP_FRAME_SPC_RGB: /* BT.601 */
1118 default:
1119 /* Y = 0.2989 R + 0.5866 G + 0.1145 B
1120 * Cb = 0.5647 (B - Y) + 128
1121 * Cr = 0.7132 (R - Y) + 128
1122 */
1123 hw_cfg->rgb_coeff_a = 19589;
1124 hw_cfg->rgb_coeff_b = 38443;
1125 hw_cfg->rgb_coeff_c = 7504;
1126 hw_cfg->rgb_coeff_e = 37008;
1127 hw_cfg->rgb_coeff_f = 46740;
1128 break;
1129
1130 case MPP_FRAME_SPC_BT709: /* BT.709 */
1131 /* Y = 0.2126 R + 0.7152 G + 0.0722 B
1132 * Cb = 0.5389 (B - Y) + 128
1133 * Cr = 0.6350 (R - Y) + 128
1134 */
1135 hw_cfg->rgb_coeff_a = 13933;
1136 hw_cfg->rgb_coeff_b = 46871;
1137 hw_cfg->rgb_coeff_c = 732;
1138 hw_cfg->rgb_coeff_e = 35317;
1139 hw_cfg->rgb_coeff_f = 41615;
1140 break;
1141 }
1142
1143 hw_cfg->r_mask_msb = hw_cfg->g_mask_msb = hw_cfg->b_mask_msb = 0;
1144 VepuFormatCfg fmt_cfg;
1145 if (!get_vepu_fmt(&fmt_cfg, set->format)) {
1146 hw_cfg->input_format = fmt_cfg.format;
1147 hw_cfg->r_mask_msb = fmt_cfg.r_mask;
1148 hw_cfg->g_mask_msb = fmt_cfg.g_mask;
1149 hw_cfg->b_mask_msb = fmt_cfg.b_mask;
1150 } else
1151 return MPP_NOK;
1152
1153 return MPP_OK;
1154 }
1155
set_picbuf(void * hal)1156 static MPP_RET set_picbuf(void *hal)
1157 {
1158 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
1159
1160 // find one dpb for current picture
1161 {
1162 RK_U32 i = 0;
1163 RK_S32 width = ctx->sps.pic_width_in_mbs * 16;
1164 RK_S32 height = ctx->sps.pic_height_in_mbs * 16;
1165 HalVp8ePicBuf *picbuf = &ctx->picbuf;
1166
1167 memset(picbuf->ref_pic, 0, sizeof(picbuf->ref_pic));
1168 memset(picbuf->ref_pic_list, 0, sizeof(picbuf->ref_pic_list));
1169
1170 for (i = 0; i < REF_FRAME_COUNT + 1; i++) {
1171 picbuf->ref_pic[i].picture.lum_width = width;
1172 picbuf->ref_pic[i].picture.lum_height = height;
1173 picbuf->ref_pic[i].picture.ch_width = width / 2;
1174 picbuf->ref_pic[i].picture.ch_height = height / 2;
1175 picbuf->ref_pic[i].picture.lum = 0;
1176 picbuf->ref_pic[i].picture.cb = 0;
1177 }
1178
1179 picbuf->cur_pic = &picbuf->ref_pic[0];
1180 }
1181
1182 ctx->ppss.size = 1;
1183 ctx->ppss.store = (Pps *)mpp_calloc(Pps, 1);
1184 if (ctx->ppss.store == NULL) {
1185 mpp_err("failed to malloc ppss store.\n");
1186 goto __ERR_RET;
1187 }
1188
1189 ctx->ppss.pps = ctx->ppss.store;
1190 ctx->ppss.pps->segment_enabled = 0;
1191 ctx->ppss.pps->sgm.map_modified = 0;
1192
1193 return MPP_OK;
1194
1195 __ERR_RET:
1196 return MPP_NOK;
1197 }
1198
alloc_buffer(void * hal)1199 static MPP_RET alloc_buffer(void *hal)
1200 {
1201 MPP_RET ret = MPP_OK;
1202
1203 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
1204
1205 Vp8eHwCfg *hw_cfg = &ctx->hw_cfg;
1206 MppEncPrepCfg *pre = &ctx->cfg->prep;
1207 RK_U32 mb_total = ctx->mb_per_frame;
1208 Vp8eVpuBuf *buffers = (Vp8eVpuBuf *)ctx->buffers;
1209
1210 //set coding format as VP8
1211 hw_cfg->coding_type = 1;
1212
1213 ret = mpp_buffer_group_get_internal(&buffers->hw_buf_grp,
1214 MPP_BUFFER_TYPE_ION);
1215 if (ret) {
1216 mpp_err("buf group get failed ret %d\n", ret);
1217 goto __ERR_RET;
1218 }
1219
1220 //add 128 bytes to avoid kernel crash
1221 ret = mpp_buffer_get(buffers->hw_buf_grp, &buffers->hw_luma_buf,
1222 MPP_ALIGN(mb_total * (16 * 16), SZ_4K) + SZ_4K);
1223 if (ret) {
1224 mpp_err("hw_luma_buf get failed ret %d\n", ret);
1225 goto __ERR_RET;
1226 }
1227 {
1228 RK_U32 i = 0;
1229 for (i = 0; i < 2; i++) {
1230 //add 128 bytes to avoid kernel crash
1231 ret = mpp_buffer_get(buffers->hw_buf_grp, &buffers->hw_cbcr_buf[i],
1232 MPP_ALIGN(mb_total * (2 * 8 * 8), SZ_4K) + SZ_4K);
1233 if (ret) {
1234 mpp_err("hw_cbcr_buf[%d] get failed ret %d\n", i, ret);
1235 goto __ERR_RET;
1236 }
1237 }
1238 }
1239 hw_cfg->internal_img_lum_base_w = mpp_buffer_get_fd(buffers->hw_luma_buf);
1240 hw_cfg->internal_img_chr_base_w = mpp_buffer_get_fd(buffers->hw_cbcr_buf[0]);
1241
1242 hw_cfg->internal_img_lum_base_r[0] = mpp_buffer_get_fd(buffers->hw_luma_buf);
1243 hw_cfg->internal_img_chr_base_r[0] = mpp_buffer_get_fd(buffers->hw_cbcr_buf[1]);
1244 {
1245 /* NAL size table, table size must be 64-bit multiple,
1246 * space for SEI, MVC prefix, filler and zero at the end of table.
1247 * At least 1 macroblock row in every slice.
1248 * Also used for VP8 partitions. */
1249 RK_U32 size_tbl = MPP_ALIGN(sizeof(RK_U32) * (pre->height + 4), 8);
1250 ret = mpp_buffer_get(buffers->hw_buf_grp, &buffers->hw_size_table_buf, size_tbl);
1251 if (ret) {
1252 mpp_err("hw_size_table_buf get failed ret %d\n", ret);
1253 goto __ERR_RET;
1254 }
1255 }
1256 {
1257 RK_U32 cabac_tbl_size = 8 * 55 + 8 * 96;
1258 ret = mpp_buffer_get(buffers->hw_buf_grp, &buffers->hw_cabac_table_buf,
1259 cabac_tbl_size);
1260 if (ret) {
1261 mpp_err("hw_cabac_table_buf get failed\n");
1262 goto __ERR_RET;
1263 }
1264 }
1265 hw_cfg->cabac_tbl_base = mpp_buffer_get_fd(buffers->hw_cabac_table_buf);
1266
1267 ret = mpp_buffer_get(buffers->hw_buf_grp, &buffers->hw_mv_output_buf,
1268 mb_total * 4);
1269 if (ret) {
1270 mpp_err("hw_mv_output_buf get failed ret %d\n", ret);
1271 goto __ERR_RET;
1272 }
1273
1274 hw_cfg->mv_output_base = mpp_buffer_get_fd(buffers->hw_mv_output_buf);
1275
1276 memset(mpp_buffer_get_ptr(buffers->hw_mv_output_buf), 0, sizeof(RK_U32) * mb_total);
1277
1278 ret = mpp_buffer_get(buffers->hw_buf_grp, &buffers->hw_prob_count_buf, VP8_PROB_COUNT_BUF_SIZE);
1279 if (ret) {
1280 mpp_err("hw_prob_count_buf get failed ret %d\n", ret);
1281 goto __ERR_RET;
1282 }
1283
1284 hw_cfg->prob_count_base = mpp_buffer_get_fd(buffers->hw_prob_count_buf);
1285 {
1286 /* VP8: Segmentation map, 4 bits/mb, 64-bit multiple. */
1287 RK_U32 segment_map_size = (mb_total * 4 + 63) / 64 * 8;
1288
1289 ret = mpp_buffer_get(buffers->hw_buf_grp, &buffers->hw_segment_map_buf, segment_map_size);
1290 if (ret) {
1291 mpp_err("hw_segment_map_buf get failed ret %d\n", ret);
1292 goto __ERR_RET;
1293 }
1294
1295 hw_cfg->segment_map_base = mpp_buffer_get_fd(buffers->hw_segment_map_buf);
1296 memset(mpp_buffer_get_ptr(buffers->hw_segment_map_buf), 0, segment_map_size / 4);
1297
1298 }
1299 {
1300 RK_U32 i = 0;
1301
1302 ctx->picbuf.size = 1;
1303 for (i = 0; i < 1; i++)
1304 ctx->picbuf.ref_pic[i].picture.lum = mpp_buffer_get_fd(buffers->hw_luma_buf);
1305 for (i = 0; i < 2; i++)
1306 ctx->picbuf.ref_pic[i].picture.cb = mpp_buffer_get_fd(buffers->hw_cbcr_buf[i]);
1307 }
1308 {
1309 RK_U32 pic_size = MPP_ALIGN(pre->width, 16) * MPP_ALIGN(pre->height, 16) * 3 / 2;
1310 RK_U32 out_size = pic_size / 2;
1311
1312 ret = mpp_buffer_get(buffers->hw_buf_grp, &buffers->hw_out_buf, out_size);
1313 if (ret) {
1314 mpp_err("hw_out_buf get failed ret %d\n", ret);
1315 goto __ERR_RET;
1316 }
1317 }
1318 ctx->regs = mpp_calloc(RK_U32, ctx->reg_size);
1319 if (!ctx->regs) {
1320 mpp_err("failed to calloc regs.\n");
1321 goto __ERR_RET;
1322 }
1323
1324 hw_cfg->intra_16_favor = -1;
1325 hw_cfg->prev_mode_favor = -1;
1326 hw_cfg->inter_favor = -1;
1327 hw_cfg->skip_penalty = -1;
1328 hw_cfg->diff_mv_penalty[0] = -1;
1329 hw_cfg->diff_mv_penalty[1] = -1;
1330 hw_cfg->diff_mv_penalty[2] = -1;
1331 hw_cfg->split_penalty[0] = 0;
1332 hw_cfg->split_penalty[1] = 0;
1333 hw_cfg->split_penalty[2] = 0x3FF;
1334 hw_cfg->split_penalty[3] = 0;
1335 hw_cfg->zero_mv_favor = 0;
1336
1337 hw_cfg->intra_area_top = hw_cfg->intra_area_bottom = 255;
1338 hw_cfg->intra_area_left = hw_cfg->intra_area_right = 255;
1339 hw_cfg->roi1_top = hw_cfg->roi1_bottom = 255;
1340 hw_cfg->roi1_left = hw_cfg->roi1_right = 255;
1341 hw_cfg->roi2_top = hw_cfg->roi2_bottom = 255;
1342 hw_cfg->roi2_left = hw_cfg->roi2_right = 255;
1343
1344 return ret;
1345
1346 __ERR_RET:
1347 if (buffers)
1348 hal_vp8e_buf_free(hal);
1349
1350 return ret;
1351 }
1352
hal_vp8e_enc_strm_code(void * hal,HalEncTask * task)1353 MPP_RET hal_vp8e_enc_strm_code(void *hal, HalEncTask *task)
1354 {
1355 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
1356 Vp8eHwCfg *hw_cfg = &ctx->hw_cfg;
1357 VepuOffsetCfg hw_offset;
1358
1359 MppEncCfgSet *cfg = ctx->cfg;
1360 MppEncPrepCfg *prep = &cfg->prep;
1361
1362 {
1363 RK_U32 i = 0;
1364 for (i = 0; i < 9; i++) {
1365 ctx->p_out_buf[i] = NULL;
1366 ctx->stream_size[i] = 0;
1367 }
1368 }
1369
1370 {
1371 hw_offset.fmt = prep->format;
1372
1373 hw_offset.width = prep->width;
1374 hw_offset.height = prep->height;
1375 hw_offset.hor_stride = prep->hor_stride;
1376 hw_offset.ver_stride = prep->ver_stride;
1377 hw_offset.offset_x = mpp_frame_get_offset_x(task->frame);
1378 hw_offset.offset_y = mpp_frame_get_offset_y(task->frame);
1379
1380 get_vepu_offset_cfg(&hw_offset);
1381 }
1382
1383 {
1384 HalEncTask *enc_task = task;
1385
1386 hw_cfg->input_lum_base = mpp_buffer_get_fd(enc_task->input);
1387 hw_cfg->input_cb_base = hw_cfg->input_lum_base;
1388 hw_cfg->input_cr_base = hw_cfg->input_cb_base;
1389 hw_cfg->input_lum_offset = hw_offset.offset_byte[0];
1390 hw_cfg->input_cb_offset = hw_offset.offset_byte[1];
1391 hw_cfg->input_cr_offset = hw_offset.offset_byte[2];
1392 }
1393
1394 // split memory for vp8 partition
1395 {
1396 RK_S32 offset = 0;
1397 Vp8eVpuBuf *buffers = (Vp8eVpuBuf *)ctx->buffers;
1398 RK_U8 *p_end = NULL;
1399 RK_U32 buf_size = mpp_buffer_get_size(buffers->hw_out_buf);
1400 RK_U32 bus_addr = mpp_buffer_get_fd(buffers->hw_out_buf);
1401 RK_U8 *p_start = mpp_buffer_get_ptr(buffers->hw_out_buf);
1402
1403 p_end = p_start + 3;
1404 if (ctx->frame_type == VP8E_FRM_KEY)
1405 p_end += 7;// frame tag len:I frame 10 byte, P frmae 3 byte.
1406 vp8e_set_buffer(&ctx->bitbuf[0], p_start, p_end - p_start);
1407
1408 offset = p_end - p_start;
1409 hw_cfg->output_strm_base = bus_addr;
1410 hw_cfg->output_strm_offset = offset;
1411
1412 p_start = p_end;
1413 p_end = p_start + buf_size / 10;
1414 p_end = (RK_U8 *)((size_t)p_end & ~0x7);
1415 vp8e_set_buffer(&ctx->bitbuf[1], p_start, p_end - p_start);
1416
1417 offset += p_end - p_start;
1418 hw_cfg->partition_Base[0] = bus_addr;
1419 hw_cfg->partition_offset[0] = offset;
1420
1421 p_start = p_end;
1422 p_end = mpp_buffer_get_ptr(buffers->hw_out_buf) + buf_size;
1423 p_end = (RK_U8 *)((size_t)p_end & ~0x7);
1424 vp8e_set_buffer(&ctx->bitbuf[2], p_start, p_end - p_start);
1425
1426 offset += p_end - p_start;
1427 hw_cfg->partition_Base[1] = bus_addr;
1428 hw_cfg->partition_offset[1] = offset;
1429 hw_cfg->output_strm_size = p_end - p_start;
1430
1431 p_start = p_end;
1432 }
1433
1434 {
1435 HalVp8ePicBuf *pic_buf = &ctx->picbuf;
1436
1437 pic_buf->cur_pic->show = 1;
1438 pic_buf->cur_pic->poc = ctx->frame_cnt;
1439 pic_buf->cur_pic->i_frame = (ctx->frame_type == VP8E_FRM_KEY);
1440
1441 init_picbuf(ctx);
1442
1443 if (ctx->frame_type == VP8E_FRM_P) {
1444 pic_buf->cur_pic->p_frame = 1;
1445 pic_buf->cur_pic->arf = 1;
1446 pic_buf->cur_pic->grf = 1;
1447 pic_buf->cur_pic->ipf = 1;
1448 pic_buf->ref_pic_list[0].search = 1;
1449 pic_buf->ref_pic_list[1].search = 1;
1450 pic_buf->ref_pic_list[2].search = 1;
1451 }
1452
1453 if (ctx->rc->frame_coded == 0)
1454 return MPP_OK;
1455
1456 if (ctx->rc->golden_picture_rate) {
1457 pic_buf->cur_pic->grf = 1;
1458 if (!pic_buf->cur_pic->arf)
1459 pic_buf->ref_pic_list[1].arf = 1;
1460 }
1461 }
1462 set_frame_params(ctx);
1463 set_picbuf_ref(ctx);
1464 set_code_frame(ctx);
1465
1466 return MPP_OK;
1467 }
1468
hal_vp8e_init_qp_table(void * hal)1469 MPP_RET hal_vp8e_init_qp_table(void *hal)
1470 {
1471 RK_S32 i = 0, j = 0;
1472 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
1473
1474 for (i = 0; i < QINDEX_RANGE; i++) {
1475 RK_S32 tmp = 0;
1476 Vp8eQp * qp = &ctx->qp_y1[i];
1477
1478 for (j = 0; j < 2; j++) {
1479 if (j == 0) {
1480 tmp = dc_q_lookup_tbl[MPP_CLIP3(0, QINDEX_RANGE - 1, i)];
1481 } else {
1482 tmp = ac_q_lookup_tbl[MPP_CLIP3(0, QINDEX_RANGE - 1, i)];
1483 }
1484
1485 qp->quant[j] = MPP_MIN((1 << 16) / tmp, 0x3FFF);
1486 qp->zbin[j] = ((q_zbin_factors_tbl[i] * tmp) + 64) >> 7;
1487 qp->round[j] = (q_rounding_factors_tbl[i] * tmp) >> 7;
1488 qp->dequant[j] = tmp;
1489 }
1490
1491 qp = &ctx->qp_y2[i];
1492 for (j = 0; j < 2; j++) {
1493 if (j == 0) {
1494 tmp = dc_q_lookup_tbl[MPP_CLIP3(0, QINDEX_RANGE - 1, i)];
1495 tmp = tmp * 2;
1496 } else {
1497 tmp = ac_q_lookup_tbl[MPP_CLIP3(0, QINDEX_RANGE - 1, i)];
1498 tmp = (tmp * 155) / 100;
1499 if (tmp < 8)
1500 tmp = 8;
1501 }
1502 qp->quant[j] = MPP_MIN((1 << 16) / tmp, 0x3FFF);
1503 qp->zbin[j] = ((q_zbin_factors_tbl[i] * tmp) + 64) >> 7;
1504 qp->round[j] = (q_rounding_factors_tbl[i] * tmp) >> 7;
1505 qp->dequant[j] = tmp;
1506 }
1507
1508 qp = &ctx->qp_ch[i];
1509 for (j = 0; j < 2; j++) {
1510 if (j == 0) {
1511 tmp = dc_q_lookup_tbl[MPP_CLIP3(0, QINDEX_RANGE - 1, i)];
1512 if (tmp > 132)
1513 tmp = 132;
1514 } else {
1515 tmp = ac_q_lookup_tbl[MPP_CLIP3(0, QINDEX_RANGE - 1, i)];
1516 }
1517 qp->quant[j] = MPP_MIN((1 << 16) / tmp, 0x3FFF);
1518 qp->zbin[j] = ((q_zbin_factors_tbl[i] * tmp) + 64) >> 7;
1519 qp->round[j] = (q_rounding_factors_tbl[i] * tmp) >> 7;
1520 qp->dequant[j] = tmp;
1521 }
1522 }
1523
1524 return MPP_OK;
1525 }
1526
hal_vp8e_update_buffers(void * hal,HalEncTask * task)1527 MPP_RET hal_vp8e_update_buffers(void *hal, HalEncTask *task)
1528 {
1529 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
1530 Vp8eVpuBuf *buffers = (Vp8eVpuBuf *)ctx->buffers;
1531 const RK_U32 hw_offset = ctx->hw_cfg.first_free_bit / 8;
1532 RK_U32 *part = (RK_U32 *)mpp_buffer_get_ptr(buffers->hw_size_table_buf);
1533
1534 ctx->bitbuf[1].byte_cnt += part[0] - hw_offset;
1535 ctx->bitbuf[1].data += part[0] - hw_offset;
1536
1537 ctx->bitbuf[2].byte_cnt = part[1];
1538 ctx->bitbuf[2].data += part[1];
1539 ctx->bitbuf[3].byte_cnt = part[2];
1540 ctx->bitbuf[3].data += part[2];
1541
1542 set_frame_tag(ctx);
1543
1544 if (vp8e_buffer_gap(&ctx->bitbuf[1], 4) == MPP_OK) {
1545 set_data_part_size(ctx);
1546 }
1547
1548 ctx->prev_frame_lost = 0;
1549
1550 ctx->p_out_buf[0] = (RK_U32 *)ctx->bitbuf[0].p_data;
1551 ctx->p_out_buf[1] = (RK_U32 *)ctx->bitbuf[2].p_data;
1552 if (ctx->sps.dct_partitions)
1553 ctx->p_out_buf[2] = (RK_U32 *)ctx->bitbuf[3].p_data;
1554
1555 ctx->stream_size[0] = ctx->bitbuf[0].byte_cnt +
1556 ctx->bitbuf[1].byte_cnt;
1557 ctx->stream_size[1] = ctx->bitbuf[2].byte_cnt;
1558
1559 if (ctx->sps.dct_partitions)
1560 ctx->stream_size[2] = ctx->bitbuf[3].byte_cnt;
1561
1562 ctx->frame_size = ctx->stream_size[0] + ctx->stream_size[1] +
1563 ctx->stream_size[2];
1564
1565 update_picbuf(&ctx->picbuf);
1566 {
1567 HalEncTask *enc_task = task;
1568 RK_U8 *p_out = mpp_buffer_get_ptr(enc_task->output);
1569 RK_S32 disable_ivf = ctx->cfg->codec.vp8.disable_ivf;
1570
1571 if (!disable_ivf) {
1572 p_out += enc_task->length;
1573
1574 if (ctx->frame_size) {
1575 write_ivf_frame(ctx, p_out);
1576
1577 p_out += IVF_FRM_BYTES;
1578 enc_task->length += IVF_FRM_BYTES;
1579 }
1580 }
1581
1582 memcpy(p_out, ctx->p_out_buf[0], ctx->stream_size[0]);
1583 p_out += ctx->stream_size[0];
1584 enc_task->length += ctx->stream_size[0];
1585
1586 memcpy(p_out, ctx->p_out_buf[1], ctx->stream_size[1]);
1587 p_out += ctx->stream_size[1];
1588 enc_task->length += ctx->stream_size[1];
1589
1590 memcpy(p_out, ctx->p_out_buf[2], ctx->stream_size[2]);
1591 p_out += ctx->stream_size[2];
1592 enc_task->length += ctx->stream_size[2];
1593 }
1594 return MPP_OK;
1595 }
1596
hal_vp8e_setup(void * hal)1597 MPP_RET hal_vp8e_setup(void *hal)
1598 {
1599 MPP_RET ret = MPP_OK;
1600 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
1601
1602 if (set_parameter(ctx)) {
1603 mpp_err("set vp8e parameter failed");
1604 return MPP_NOK;
1605 }
1606
1607 if (set_picbuf(ctx)) {
1608 mpp_err("set vp8e picbuf failed, no enough memory");
1609 return MPP_ERR_NOMEM;
1610 }
1611
1612 ret = alloc_buffer(ctx);
1613
1614 return ret;
1615 }
1616
hal_vp8e_buf_free(void * hal)1617 MPP_RET hal_vp8e_buf_free(void *hal)
1618 {
1619 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
1620 Vp8eVpuBuf *buffers = (Vp8eVpuBuf *)ctx->buffers;
1621
1622 if (buffers->hw_luma_buf) {
1623 mpp_buffer_put(buffers->hw_luma_buf);
1624 buffers->hw_luma_buf = NULL;
1625 }
1626
1627 {
1628 RK_U32 i = 0;
1629 for (i = 0; i < 2; i++) {
1630 if (buffers->hw_cbcr_buf[i]) {
1631 mpp_buffer_put(buffers->hw_cbcr_buf[i]);
1632 buffers->hw_cbcr_buf[i] = NULL;
1633 }
1634 }
1635 }
1636
1637 if (buffers->hw_size_table_buf) {
1638 mpp_buffer_put(buffers->hw_size_table_buf);
1639 buffers->hw_size_table_buf = NULL;
1640 }
1641
1642 if (buffers->hw_cabac_table_buf) {
1643 mpp_buffer_put(buffers->hw_cabac_table_buf);
1644 buffers->hw_cabac_table_buf = NULL;
1645 }
1646
1647 if (buffers->hw_mv_output_buf) {
1648 mpp_buffer_put(buffers->hw_mv_output_buf);
1649 buffers->hw_mv_output_buf = NULL;
1650 }
1651
1652 if (buffers->hw_prob_count_buf) {
1653 mpp_buffer_put(buffers->hw_prob_count_buf);
1654 buffers->hw_prob_count_buf = NULL;
1655 }
1656
1657 if (buffers->hw_segment_map_buf) {
1658 mpp_buffer_put(buffers->hw_segment_map_buf);
1659 buffers->hw_segment_map_buf = NULL;
1660 }
1661
1662 if (buffers->hw_out_buf) {
1663 mpp_buffer_put(buffers->hw_out_buf);
1664 buffers->hw_out_buf = NULL;
1665 }
1666
1667 if (buffers->hw_buf_grp) {
1668 mpp_buffer_group_put(buffers->hw_buf_grp);
1669 buffers->hw_buf_grp = NULL;
1670 }
1671
1672 return MPP_OK;
1673 }
1674