1 /*
2 * Copyright 2017 Rockchip Electronics Co. LTD
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define MODULE_TAG "hal_vp8e_base"
18
19 #include <string.h>
20
21 #include "mpp_mem.h"
22 #include "mpp_buffer.h"
23 #include "mpp_common.h"
24
25 #include "hal_vp8e_base.h"
26 #include "hal_vp8e_putbit.h"
27 #include "hal_vp8e_table.h"
28 #include "hal_vp8e_debug.h"
29
set_frame_params(void * hal)30 static MPP_RET set_frame_params(void *hal)
31 {
32
33 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
34 {
35 RK_S32 i;
36 Pps *pps = ctx->ppss.pps;
37 Vp8eSps *sps = &ctx->sps;
38
39 for (i = 0; i < 4; i++) {
40 pps->qp_sgm[i] = ctx->rc->qp_hdr;
41 pps->level_sgm[i] = sps->filter_level;
42 }
43 }
44
45 return MPP_OK;
46 }
47
set_filter(void * hal)48 static MPP_RET set_filter(void *hal)
49 {
50 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
51 Vp8eSps *sps = &ctx->sps;
52
53 if (sps->auto_filter_level) {
54 RK_U32 qp = 36;
55 Pps *p_pps = ctx->ppss.pps;
56 if (ctx->frame_type == VP8E_FRM_KEY) {
57 RK_S32 tmp = (qp * 64) / 128 + 8;
58 sps->filter_level = MPP_CLIP3(0, 63, tmp);
59 p_pps->level_sgm[0] = MPP_CLIP3(0, 63, (p_pps->qp_sgm[0] * 64) / 128 + 8);
60 p_pps->level_sgm[1] = MPP_CLIP3(0, 63, (p_pps->qp_sgm[1] * 64) / 128 + 8);
61 p_pps->level_sgm[2] = MPP_CLIP3(0, 63, (p_pps->qp_sgm[2] * 64) / 128 + 8);
62 p_pps->level_sgm[3] = MPP_CLIP3(0, 63, (p_pps->qp_sgm[3] * 64) / 128 + 8);
63 } else {
64 sps->filter_level = inter_level_tbl[qp];
65 p_pps->level_sgm[0] = inter_level_tbl[p_pps->qp_sgm[0]];
66 p_pps->level_sgm[1] = inter_level_tbl[p_pps->qp_sgm[1]];
67 p_pps->level_sgm[2] = inter_level_tbl[p_pps->qp_sgm[2]];
68 p_pps->level_sgm[3] = inter_level_tbl[p_pps->qp_sgm[3]];
69 }
70 }
71
72 if (sps->auto_filter_sharpness) {
73 sps->filter_sharpness = 0;
74 }
75
76 if (!sps->filter_delta_enable)
77 return MPP_NOK;
78
79 if (sps->filter_delta_enable == 2) {
80 sps->filter_delta_enable = 1;
81 return MPP_NOK;
82 }
83
84 if (sps->filter_level == 0) {
85 sps->ref_delta[0] = 0; /* Intra frame */
86 sps->ref_delta[1] = 0; /* Last frame */
87 sps->ref_delta[2] = 0; /* Golden frame */
88 sps->ref_delta[3] = 0; /* Altref frame */
89 sps->mode_delta[0] = 0; /* BPRED */
90 sps->mode_delta[1] = 0; /* Zero */
91 sps->mode_delta[2] = 0; /* New mv */
92 sps->mode_delta[3] = 0; /* Split mv */
93 return MPP_NOK;
94 }
95
96 if (!ctx->picbuf.cur_pic->ipf && !ctx->picbuf.cur_pic->grf &&
97 !ctx->picbuf.cur_pic->arf) {
98 memcpy(sps->ref_delta, sps->old_ref_delta, sizeof(sps->ref_delta));
99 memcpy(sps->mode_delta, sps->old_mode_delta, sizeof(sps->mode_delta));
100 return MPP_NOK;
101 }
102
103 sps->ref_delta[0] = 2; /* Intra frame */
104 sps->ref_delta[1] = 0; /* Last frame */
105 sps->ref_delta[2] = -2; /* Golden frame */
106 sps->ref_delta[3] = -2; /* Altref frame */
107
108 sps->mode_delta[0] = 4; /* BPRED */
109 sps->mode_delta[1] = -2; /* Zero */
110 sps->mode_delta[2] = 2; /* New mv */
111 sps->mode_delta[3] = 4; /* Split mv */
112
113 {
114 RK_U32 i = 0;
115 for (i = 0; i < 4; i++) {
116 sps->ref_delta[i] = MPP_CLIP3(-0x3f, 0x3f, sps->ref_delta[i]);
117 sps->mode_delta[i] = MPP_CLIP3(-0x3f, 0x3f, sps->mode_delta[i]);
118 }
119 }
120 return MPP_OK;
121 }
122
set_segmentation(void * hal)123 static MPP_RET set_segmentation(void *hal)
124 {
125 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
126
127 Vp8ePps *ppss = &ctx->ppss;
128 Pps *pps = ppss->pps;
129 Vp8eHwCfg *hw_cfg = &ctx->hw_cfg;
130 Vp8eVpuBuf *buffers = (Vp8eVpuBuf *)ctx->buffers;
131
132 {
133 RK_S32 qp = ctx->rc->qp_hdr;
134
135 if (hw_cfg->roi1_delta_qp)
136 pps->qp_sgm[1] = MPP_CLIP3(0, 127, qp - hw_cfg->roi1_delta_qp);
137
138 if (hw_cfg->roi2_delta_qp)
139 pps->qp_sgm[2] = MPP_CLIP3(0, 127, qp - hw_cfg->roi2_delta_qp);
140 }
141
142 {
143 RK_U32 x, y, mb, mask, id;
144 RK_U32 *map = mpp_buffer_get_ptr(buffers->hw_segment_map_buf);
145 RK_U32 *map_bck = map;
146 RK_U32 mapSize = (ctx->mb_per_frame + 15) / 16 * 8;
147
148 mpp_buffer_sync_begin(buffers->hw_segment_map_buf);
149
150 if (hw_cfg->roi1_delta_qp || hw_cfg->roi2_delta_qp) {
151 pps->segment_enabled = 1;
152
153 memset(pps->sgm.id_cnt, 0, sizeof(pps->sgm.id_cnt));
154
155 for (y = 0, mb = 0, mask = 0; y < ctx->mb_per_col; y++) {
156 for (x = 0; x < ctx->mb_per_row; x++) {
157 id = 0;
158 if ((x >= hw_cfg->roi1_left) && (x <= hw_cfg->roi1_right) &&
159 (y >= hw_cfg->roi1_top) && (y <= hw_cfg->roi1_bottom))
160 id = 1;
161 if ((x >= hw_cfg->roi1_left) && (x <= hw_cfg->roi2_right) &&
162 (y >= hw_cfg->roi2_top) && (y <= hw_cfg->roi2_bottom))
163 id = 2;
164
165 pps->sgm.id_cnt[id]++;
166
167 mask |= id << (28 - 4 * (mb % 8));
168 if ((mb % 8) == 7) {
169 *map++ = mask;
170 mask = 0;
171 }
172 mb++;
173 }
174 }
175 *map++ = mask;
176 vp8e_swap_endian(map_bck, mapSize);
177 } else if (pps->segment_enabled && pps->sgm.map_modified) {
178 memset(pps->sgm.id_cnt, 0, sizeof(pps->sgm.id_cnt));
179
180 for (mb = 0, mask = 0; mb < mapSize / 4; mb++) {
181 mask = map[mb];
182 for (x = 0; x < 8; x++) {
183 if (mb * 8 + x < ctx->mb_per_frame) {
184 id = (mask >> (28 - 4 * x)) & 0xF;
185 pps->sgm.id_cnt[id]++;
186 }
187 }
188 }
189 vp8e_swap_endian(map_bck, mapSize);
190 }
191
192 mpp_buffer_sync_end(buffers->hw_segment_map_buf);
193 }
194 if (ctx->picbuf.cur_pic->i_frame || !pps->segment_enabled) {
195 memset(ppss->qp_sgm, 0xff, sizeof(ppss->qp_sgm));
196 memset(ppss->level_sgm, 0xff, sizeof(ppss->level_sgm));
197 ppss->prev_pps = NULL;
198 } else
199 ppss->prev_pps = ppss->pps;
200
201 return MPP_OK;
202 }
203
set_intra_pred_penalties(Vp8eHwCfg * hw_cfg,RK_U32 qp)204 static void set_intra_pred_penalties(Vp8eHwCfg *hw_cfg, RK_U32 qp)
205 {
206 RK_S32 i, tmp;
207
208 /* Intra 4x4 mode */
209 tmp = qp * 2 + 8;
210 for (i = 0; i < 10; i++) {
211 hw_cfg->intra_b_mode_penalty[i] = (intra4_mode_tree_penalty_tbl[i] * tmp) >> 8;
212 }
213
214 /* Intra 16x16 mode */
215 tmp = qp * 2 + 64;
216 for (i = 0; i < 4; i++) {
217 hw_cfg->intra_mode_penalty[i] = (intra16_mode_tree_penalty_tbl[i] * tmp) >> 8;
218 }
219
220 /* If favor has not been set earlier by testId use default */
221 if (hw_cfg->intra_16_favor == -1)
222 hw_cfg->intra_16_favor = qp * 1024 / 128;
223 }
224
set_hdr_segmentation(Vp8ePutBitBuf * bitbuf,Vp8ePps * ppss,Vp8eHalEntropy * entropy)225 static void set_hdr_segmentation(Vp8ePutBitBuf *bitbuf, Vp8ePps *ppss,
226 Vp8eHalEntropy *entropy)
227 {
228 RK_S32 i, tmp;
229 RK_U8 data_modified = 0;
230
231 Pps *pps = ppss->pps;
232 Sgm *sgm = &ppss->pps->sgm;
233
234 if (memcmp(ppss->qp_sgm, pps->qp_sgm, sizeof(ppss->qp_sgm)))
235 data_modified = 1;
236
237 if (memcmp(ppss->level_sgm, pps->level_sgm, sizeof(ppss->level_sgm)))
238 data_modified = 1;
239
240 if (!ppss->prev_pps) {
241 sgm->map_modified = 1;
242 }
243
244 vp8e_put_lit(bitbuf, sgm->map_modified, 1);
245 vp8e_put_lit(bitbuf, data_modified, 1);
246
247 if (data_modified) {
248 vp8e_put_lit(bitbuf, 1, 1);
249
250 for (i = 0; i < SGM_CNT; i++) {
251 tmp = pps->qp_sgm[i];
252 vp8e_put_lit(bitbuf, 1, 1);
253 vp8e_put_lit(bitbuf, MPP_ABS(tmp), 7);
254 vp8e_put_lit(bitbuf, tmp < 0, 1);
255 }
256
257 for (i = 0; i < SGM_CNT; i++) {
258 tmp = pps->level_sgm[i];
259 vp8e_put_lit(bitbuf, 1, 1);
260 vp8e_put_lit(bitbuf, MPP_ABS(tmp), 6);
261 vp8e_put_lit(bitbuf, tmp < 0, 1);
262 }
263 }
264
265 if (sgm->map_modified) {
266 RK_S32 sum1 = sgm->id_cnt[0] + sgm->id_cnt[1];
267 RK_S32 sum2 = sgm->id_cnt[2] + sgm->id_cnt[3];
268
269 tmp = 255 * sum1 / (sum1 + sum2);
270 entropy->segment_prob[0] = MPP_CLIP3(1, 255, tmp);
271
272 tmp = sum1 ? 255 * sgm->id_cnt[0] / sum1 : 255;
273 entropy->segment_prob[1] = MPP_CLIP3(1, 255, tmp);
274
275 tmp = sum2 ? 255 * sgm->id_cnt[2] / sum2 : 255;
276 entropy->segment_prob[2] = MPP_CLIP3(1, 255, tmp);
277
278 for (i = 0; i < 3; i++) {
279 if (sgm->id_cnt[i] != 0) {
280 vp8e_put_lit(bitbuf, 1, 1);
281 vp8e_put_lit(bitbuf, entropy->segment_prob[i], 8);
282 } else {
283 vp8e_put_lit(bitbuf, 0, 1);
284 }
285 }
286 }
287
288 memcpy(ppss->qp_sgm, pps->qp_sgm, sizeof(ppss->qp_sgm));
289 memcpy(ppss->level_sgm, pps->level_sgm, sizeof(ppss->level_sgm));
290 }
291
set_filter_level_delta(Vp8ePutBitBuf * bitbuf,Vp8eSps * sps)292 static void set_filter_level_delta(Vp8ePutBitBuf *bitbuf, Vp8eSps *sps)
293 {
294 RK_S32 i, tmp;
295 RK_U8 update = 0;
296 RK_S32 mode_update[4];
297 RK_S32 ref_update[4];
298
299 for (i = 0; i < 4; i++) {
300 mode_update[i] = sps->mode_delta[i] != sps->old_mode_delta[i];
301 ref_update[i] = sps->ref_delta[i] != sps->old_ref_delta[i];
302 if (mode_update[i] || ref_update[i])
303 update = 1;
304 }
305
306 if (!sps->refresh_entropy)
307 update = 1;
308
309 vp8e_put_lit(bitbuf, update, 1);
310 if (!update)
311 return;
312
313 for (i = 0; i < 4; i++) {
314 vp8e_put_lit(bitbuf, ref_update[i], 1);
315 if (ref_update[i]) {
316 tmp = sps->ref_delta[i];
317 vp8e_put_lit(bitbuf, MPP_ABS(tmp), 6);
318 vp8e_put_lit(bitbuf, tmp < 0, 1);
319 }
320 }
321
322 for (i = 0; i < 4; i++) {
323 vp8e_put_lit(bitbuf, mode_update[i], 1);
324 if (mode_update[i]) {
325 tmp = sps->mode_delta[i];
326 vp8e_put_lit(bitbuf, MPP_ABS(tmp), 6);
327 vp8e_put_lit(bitbuf, tmp < 0, 1);
328 }
329 }
330
331 memcpy(sps->old_ref_delta, sps->ref_delta, sizeof(sps->ref_delta));
332 memcpy(sps->old_mode_delta, sps->mode_delta, sizeof(sps->mode_delta));
333 }
334
set_frame_header(void * hal)335 static MPP_RET set_frame_header(void *hal)
336 {
337 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
338
339 Vp8eSps *sps = &ctx->sps;
340 Pps *pps = ctx->ppss.pps;
341
342 Vp8ePutBitBuf *bitbuf = &ctx->bitbuf[1];
343 HalVp8eRefPic *cur_pic = ctx->picbuf.cur_pic;
344 Vp8eHalEntropy *entropy = &ctx->entropy;
345
346 if (cur_pic->i_frame) {
347 vp8e_put_lit(bitbuf, sps->color_type, 1);
348
349 vp8e_put_lit(bitbuf, sps->clamp_type, 1);
350 }
351
352 vp8e_put_lit(bitbuf, pps->segment_enabled, 1);
353 if (pps->segment_enabled)
354 set_hdr_segmentation(bitbuf, &ctx->ppss, entropy);
355
356 vp8e_put_lit(bitbuf, sps->filter_type, 1);
357
358 vp8e_put_lit(bitbuf, sps->filter_level, 6);
359
360 vp8e_put_lit(bitbuf, sps->filter_sharpness, 3);
361
362 vp8e_put_lit(bitbuf, sps->filter_delta_enable, 1);
363 if (sps->filter_delta_enable) {
364 /* Filter level delta references reset by key frame */
365 if (cur_pic->i_frame) {
366 memset(sps->old_ref_delta, 0, sizeof(sps->ref_delta));
367 memset(sps->old_mode_delta, 0, sizeof(sps->mode_delta));
368 }
369 set_filter_level_delta(bitbuf, sps);
370 }
371
372 vp8e_put_lit(bitbuf, sps->dct_partitions, 2);
373
374 vp8e_put_lit(bitbuf, ctx->rc->qp_hdr, 7);
375
376 vp8e_put_lit(bitbuf, 0, 1);
377 vp8e_put_lit(bitbuf, 0, 1);
378 vp8e_put_lit(bitbuf, 0, 1);
379 vp8e_put_lit(bitbuf, 0, 1);
380 vp8e_put_lit(bitbuf, 0, 1);
381
382 if (!cur_pic->i_frame) {
383 HalVp8eRefPic *ref_pic_list = ctx->picbuf.ref_pic_list;
384
385 vp8e_put_lit(bitbuf, cur_pic->grf, 1); /* Grf refresh */
386 vp8e_put_lit(bitbuf, cur_pic->arf, 1); /* Arf refresh */
387
388 if (!cur_pic->grf) {
389 if (ref_pic_list[0].grf) {
390 vp8e_put_lit(bitbuf, 1, 2); /* Ipf -> grf */
391 } else if (ref_pic_list[2].grf) {
392 vp8e_put_lit(bitbuf, 2, 2); /* Arf -> grf */
393 } else {
394 vp8e_put_lit(bitbuf, 0, 2); /* Not updated */
395 }
396 }
397
398 if (!cur_pic->arf) {
399 if (ref_pic_list[0].arf) {
400 vp8e_put_lit(bitbuf, 1, 2); /* Ipf -> arf */
401 } else if (ref_pic_list[1].arf) {
402 vp8e_put_lit(bitbuf, 2, 2); /* Grf -> arf */
403 } else {
404 vp8e_put_lit(bitbuf, 0, 2); /* Not updated */
405 }
406 }
407
408 vp8e_put_lit(bitbuf, sps->sing_bias[1], 1); /* Grf */
409 vp8e_put_lit(bitbuf, sps->sing_bias[2], 1); /* Arf */
410 }
411
412 vp8e_put_lit(bitbuf, sps->refresh_entropy, 1);
413 if (!cur_pic->i_frame) {
414 vp8e_put_lit(bitbuf, cur_pic->ipf, 1);
415 }
416 vp8e_calc_coeff_prob(bitbuf, &entropy->coeff_prob, &entropy->old_coeff_prob);
417 vp8e_put_lit(bitbuf, 1, 1);
418 vp8e_put_lit(bitbuf, entropy->skip_false_prob, 8);
419
420 if (cur_pic->i_frame)
421 return MPP_OK;
422
423 vp8e_put_lit(bitbuf, entropy->intra_prob, 8);
424 vp8e_put_lit(bitbuf, entropy->last_prob, 8);
425 vp8e_put_lit(bitbuf, entropy->gf_prob, 8);
426 vp8e_put_lit(bitbuf, 0, 1);
427 vp8e_put_lit(bitbuf, 0, 1);
428 vp8e_calc_mv_prob(bitbuf, &entropy->mv_prob, &entropy->old_mv_prob);
429
430 return MPP_OK;
431 }
432
set_new_frame(void * hal)433 static MPP_RET set_new_frame(void *hal)
434 {
435 RK_S32 i;
436 RK_S32 qp;
437 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
438 Vp8eSps *sps = &ctx->sps;
439 Vp8eHwCfg *hw_cfg = &ctx->hw_cfg;
440 Vp8eVpuBuf *buffers = (Vp8eVpuBuf *)ctx->buffers;
441
442 hw_cfg->output_strm_size /= 8;
443 hw_cfg->output_strm_size &= (~0x07);
444
445 {
446 hw_cfg->output_strm_offset += ctx->bitbuf[1].byte_cnt;
447 hw_cfg->first_free_bit = (hw_cfg->output_strm_offset & 0x07) * 8;
448 }
449
450 if (hw_cfg->first_free_bit != 0) {
451 RK_U32 val;
452 RK_U8 *pTmp = (RK_U8 *)((size_t)(ctx->bitbuf[1].data) & (~0x07));
453
454 for (val = 6; val >= hw_cfg->first_free_bit / 8; val--) {
455 pTmp[val] = 0;
456 }
457
458 val = pTmp[0] << 24;
459 val |= pTmp[1] << 16;
460 val |= pTmp[2] << 8;
461 val |= pTmp[3];
462
463 hw_cfg->strm_start_msb = val;
464
465 if (hw_cfg->first_free_bit > 32) {
466 val = pTmp[4] << 24;
467 val |= pTmp[5] << 16;
468 val |= pTmp[6] << 8;
469
470 hw_cfg->strm_start_lsb = val;
471 } else {
472 hw_cfg->strm_start_lsb = 0;
473 }
474 } else {
475 hw_cfg->strm_start_msb = hw_cfg->strm_start_lsb = 0;
476 }
477
478 if (sps->quarter_pixel_mv == 0) {
479 hw_cfg->disable_qp_mv = 1;
480 } else if (sps->quarter_pixel_mv == 1) {
481 if (ctx->mb_per_frame > 8160)
482 hw_cfg->disable_qp_mv = 1;
483 else
484 hw_cfg->disable_qp_mv = 0;
485 } else {
486 hw_cfg->disable_qp_mv = 0;
487 }
488
489 hw_cfg->enable_cabac = 1;
490
491 if (sps->split_mv == 0)
492 hw_cfg->split_mv_mode = 0;
493 else if (sps->split_mv == 1) {
494 if (ctx->mb_per_frame > 1584)
495 hw_cfg->split_mv_mode = 0;
496 else
497 hw_cfg->split_mv_mode = 1;
498 } else
499 hw_cfg->split_mv_mode = 1;
500
501 qp = ctx->rc->qp_hdr;
502 if (hw_cfg->inter_favor == -1) {
503 RK_S32 tmp = 128 - ctx->entropy.intra_prob;
504
505 if (tmp < 0) {
506 hw_cfg->inter_favor = tmp & 0xFFFF;
507 } else {
508 tmp = qp * 2 - 40;
509 hw_cfg->inter_favor = MPP_MAX(0, tmp);
510 }
511 }
512
513 if (hw_cfg->diff_mv_penalty[0] == -1)
514 hw_cfg->diff_mv_penalty[0] = 64 / 2;
515 if (hw_cfg->diff_mv_penalty[1] == -1)
516 hw_cfg->diff_mv_penalty[1] = 60 / 2 * 32;
517 if (hw_cfg->diff_mv_penalty[2] == -1)
518 hw_cfg->diff_mv_penalty[2] = 8;
519 if (hw_cfg->skip_penalty == -1)
520 hw_cfg->skip_penalty = (qp >= 100) ? (3 * qp / 4) : 0; /* Zero/nearest/near */
521 if (hw_cfg->golden_penalty == -1)
522 hw_cfg->golden_penalty = MPP_MAX(0, 5 * qp / 4 - 10);
523 if (hw_cfg->split_penalty[0] == 0)
524 hw_cfg->split_penalty[0] = MPP_MIN(1023, vp8_split_penalty_tbl[qp] / 2);
525 if (hw_cfg->split_penalty[1] == 0)
526 hw_cfg->split_penalty[1] = MPP_MIN(1023, (2 * vp8_split_penalty_tbl[qp] + 40) / 4);
527 if (hw_cfg->split_penalty[3] == 0)
528 hw_cfg->split_penalty[3] = MPP_MIN(511, (8 * vp8_split_penalty_tbl[qp] + 500) / 16);
529
530 for (i = 0; i < 128; i++) {
531 RK_S32 y, x;
532
533 hw_cfg->dmv_penalty[i] = i * 2;
534 y = vp8e_calc_cost_mv(i * 2, ctx->entropy.mv_prob[0]); /* mv y */
535 x = vp8e_calc_cost_mv(i * 2, ctx->entropy.mv_prob[1]); /* mv x */
536 hw_cfg->dmv_qpel_penalty[i] = MPP_MIN(255, (y + x + 1) / 2 * weight_tbl[qp] >> 8);
537 }
538
539 for (i = 0; i < 4; i++) {
540 qp = ctx->ppss.pps->qp_sgm[i];
541 hw_cfg->y1_quant_dc[i] = ctx->qp_y1[qp].quant[0];
542 hw_cfg->y1_quant_ac[i] = ctx->qp_y1[qp].quant[1];
543 hw_cfg->y2_quant_dc[i] = ctx->qp_y2[qp].quant[0];
544 hw_cfg->y2_quant_ac[i] = ctx->qp_y2[qp].quant[1];
545 hw_cfg->ch_quant_dc[i] = ctx->qp_ch[qp].quant[0];
546 hw_cfg->ch_quant_ac[i] = ctx->qp_ch[qp].quant[1];
547 hw_cfg->y1_zbin_dc[i] = ctx->qp_y1[qp].zbin[0];
548 hw_cfg->y1_zbin_ac[i] = ctx->qp_y1[qp].zbin[1];
549 hw_cfg->y2_zbin_dc[i] = ctx->qp_y2[qp].zbin[0];
550 hw_cfg->y2_zbin_ac[i] = ctx->qp_y2[qp].zbin[1];
551 hw_cfg->ch_zbin_dc[i] = ctx->qp_ch[qp].zbin[0];
552 hw_cfg->ch_zbin_ac[i] = ctx->qp_ch[qp].zbin[1];
553 hw_cfg->y1_round_dc[i] = ctx->qp_y1[qp].round[0];
554 hw_cfg->y1_round_ac[i] = ctx->qp_y1[qp].round[1];
555 hw_cfg->y2_round_dc[i] = ctx->qp_y2[qp].round[0];
556 hw_cfg->y2_round_ac[i] = ctx->qp_y2[qp].round[1];
557 hw_cfg->ch_round_dc[i] = ctx->qp_ch[qp].round[0];
558 hw_cfg->ch_round_ac[i] = ctx->qp_ch[qp].round[1];
559 hw_cfg->y1_dequant_dc[i] = ctx->qp_y1[qp].dequant[0];
560 hw_cfg->y1_dequant_ac[i] = ctx->qp_y1[qp].dequant[1];
561 hw_cfg->y2_dequant_dc[i] = ctx->qp_y2[qp].dequant[0];
562 hw_cfg->y2_dequant_ac[i] = ctx->qp_y2[qp].dequant[1];
563 hw_cfg->ch_dequant_dc[i] = ctx->qp_ch[qp].dequant[0];
564 hw_cfg->ch_dequant_ac[i] = ctx->qp_ch[qp].dequant[1];
565
566 hw_cfg->filter_level[i] = ctx->ppss.pps->level_sgm[i];
567 }
568
569 hw_cfg->bool_enc_value = ctx->bitbuf[1].bottom;
570 hw_cfg->bool_enc_value_bits = 24 - ctx->bitbuf[1].bits_left;
571 hw_cfg->bool_enc_range = ctx->bitbuf[1].range;
572
573 if (ctx->picbuf.cur_pic->i_frame)
574 hw_cfg->frame_coding_type = 1;
575 else
576 hw_cfg->frame_coding_type = 0;
577
578 hw_cfg->size_tbl_base = mpp_buffer_get_fd(buffers->hw_size_table_buf);
579
580 hw_cfg->dct_partitions = sps->dct_partitions;
581 hw_cfg->filter_disable = sps->filter_type;
582 hw_cfg->filter_sharpness = sps->filter_sharpness;
583 hw_cfg->segment_enable = ctx->ppss.pps->segment_enabled;
584 hw_cfg->segment_map_update = ctx->ppss.pps->sgm.map_modified;
585
586 ctx->ppss.pps->sgm.map_modified = 0;
587
588 for (i = 0; i < 4; i++) {
589 hw_cfg->lf_ref_delta[i] = sps->ref_delta[i];
590 hw_cfg->lf_mode_delta[i] = sps->mode_delta[i];
591 }
592
593 set_intra_pred_penalties(hw_cfg, qp);
594
595 memset(mpp_buffer_get_ptr(buffers->hw_prob_count_buf),
596 0, VP8_PROB_COUNT_BUF_SIZE);
597 mpp_buffer_sync_end(buffers->hw_prob_count_buf);
598
599 return MPP_OK;
600 }
601
set_code_frame(void * hal)602 static MPP_RET set_code_frame(void *hal)
603 {
604 HalVp8eCtx *ctx = (HalVp8eCtx *) hal;
605
606 vp8e_init_entropy(ctx);
607 set_segmentation(ctx);
608 set_filter(ctx);
609 set_frame_header(ctx);
610 set_new_frame(ctx);
611 vp8e_write_entropy_tables(ctx);
612
613 return MPP_OK;
614 }
615
reset_refpic(HalVp8eRefPic * refPic)616 static void reset_refpic(HalVp8eRefPic *refPic)
617 {
618 refPic->poc = -1;
619 refPic->i_frame = 0;
620 refPic->p_frame = 0;
621 refPic->show = 0;
622 refPic->ipf = 0;
623 refPic->arf = 0;
624 refPic->grf = 0;
625 refPic->search = 0;
626 }
627
init_ref_pic_list(HalVp8ePicBuf * picbuf)628 static void init_ref_pic_list(HalVp8ePicBuf *picbuf)
629 {
630 RK_S32 i, j;
631
632 HalVp8eRefPic *ref_pic = picbuf->ref_pic;
633 HalVp8eRefPic *cur_pic = picbuf->cur_pic;
634 HalVp8eRefPic *ref_pic_list = picbuf->ref_pic_list;
635
636 j = 0;
637 for (i = 0; i < picbuf->size + 1; i++) {
638 if (ref_pic[i].ipf && (&ref_pic[i] != cur_pic)) {
639 ref_pic_list[j++] = ref_pic[i];
640 break;
641 }
642 }
643
644 for (i = 0; i < picbuf->size + 1; i++) {
645 if (ref_pic[i].grf && (&ref_pic[i] != cur_pic)) {
646 ref_pic_list[j++] = ref_pic[i];
647 break;
648 }
649 }
650
651 for (i = 0; i < picbuf->size + 1; i++) {
652 if (ref_pic[i].arf && (&ref_pic[i] != cur_pic)) {
653 ref_pic_list[j] = ref_pic[i];
654 break;
655 }
656 }
657
658 for (i = 0; i < picbuf->size; i++) {
659 ref_pic_list[i].ipf = 0;
660 ref_pic_list[i].grf = 0;
661 ref_pic_list[i].arf = 0;
662 }
663 }
664
init_picbuf(void * hal)665 static MPP_RET init_picbuf(void *hal)
666 {
667 RK_S32 i = 0;
668
669 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
670 HalVp8ePicBuf *buf = &ctx->picbuf;
671
672 if (buf->cur_pic->i_frame) {
673 buf->cur_pic->p_frame = 0;
674 buf->cur_pic->ipf = 1;
675 buf->cur_pic->grf = 1;
676 buf->cur_pic->arf = 1;
677
678 for (i = 0; i < 4; i++) {
679 if (&buf->ref_pic[i] != buf->cur_pic) {
680 reset_refpic(&buf->ref_pic[i]);
681 }
682 }
683 }
684
685 for (i = 0; i < 3; i++) {
686 reset_refpic(&buf->ref_pic_list[i]);
687 }
688
689 init_ref_pic_list(buf);
690
691 return MPP_OK;
692 }
693
set_picbuf_ref(void * hal)694 static MPP_RET set_picbuf_ref(void *hal)
695 {
696 RK_S32 i = 0;
697
698 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
699 HalVp8ePicBuf *pic_buf = &ctx->picbuf;
700 Vp8eHwCfg *hw_cfg = &ctx->hw_cfg;
701 HalVp8eRefPic *ref_pic_list = pic_buf->ref_pic_list;
702
703 {
704 RK_S32 no_grf = 0;
705 RK_S32 no_arf = 0;
706 if (pic_buf->size < 2) {
707 no_grf = 1;
708 pic_buf->cur_pic->grf = 0;
709 }
710 if (pic_buf->size < 3) {
711 no_arf = 1;
712 pic_buf->cur_pic->arf = 0;
713 }
714
715 for (i = 0; i < pic_buf->size; i++) {
716 if (pic_buf->cur_pic->grf || no_grf)
717 pic_buf->ref_pic_list[i].grf = 0;
718 if (pic_buf->cur_pic->arf || no_arf)
719 pic_buf->ref_pic_list[i].arf = 0;
720 }
721 }
722 {
723 RK_S32 ref_idx = -1;
724 RK_S32 ref_idx2 = -1;
725
726 for (i = 0; i < 3; i++) {
727 if ((i < pic_buf->size) && ref_pic_list[i].search) {
728 if (ref_idx == -1)
729 ref_idx = i;
730 else if (ref_idx2 == -1)
731 ref_idx2 = i;
732 else
733 ref_pic_list[i].search = 0;
734 } else {
735 ref_pic_list[i].search = 0;
736 }
737 }
738
739 if (ref_idx == -1)
740 ref_idx = 0;
741
742 hw_cfg->mv_ref_idx[0] = hw_cfg->mv_ref_idx[1] = ref_idx;
743
744 if (pic_buf->cur_pic->p_frame) {
745 pic_buf->ref_pic_list[ref_idx].search = 1;
746
747 hw_cfg->internal_img_lum_base_r[0] = ref_pic_list[ref_idx].picture.lum;
748 hw_cfg->internal_img_chr_base_r[0] = ref_pic_list[ref_idx].picture.cb;
749 hw_cfg->internal_img_lum_base_r[1] = ref_pic_list[ref_idx].picture.lum;
750 hw_cfg->internal_img_chr_base_r[1] = ref_pic_list[ref_idx].picture.cb;
751 hw_cfg->mv_ref_idx[0] = hw_cfg->mv_ref_idx[1] = ref_idx;
752 hw_cfg->ref2_enable = 0;
753
754 if (ref_idx2 != -1) {
755 hw_cfg->internal_img_lum_base_r[1] = ref_pic_list[ref_idx2].picture.lum;
756 hw_cfg->internal_img_chr_base_r[1] = ref_pic_list[ref_idx2].picture.cb;
757 hw_cfg->mv_ref_idx[1] = ref_idx2;
758 hw_cfg->ref2_enable = 1;
759 }
760 }
761 }
762 hw_cfg->rec_write_disable = 0;
763
764 if (!pic_buf->cur_pic->picture.lum) {
765 HalVp8eRefPic *cur_pic = pic_buf->cur_pic;
766 HalVp8eRefPic *cand;
767 RK_S32 recIdx = -1;
768
769 for (i = 0; i < pic_buf->size + 1; i++) {
770 cand = &pic_buf->ref_pic[i];
771 if (cand == cur_pic)
772 continue;
773 if (((cur_pic->ipf | cand->ipf) == cur_pic->ipf) &&
774 ((cur_pic->grf | cand->grf) == cur_pic->grf) &&
775 ((cur_pic->arf | cand->arf) == cur_pic->arf))
776 recIdx = i;
777 }
778
779 if (recIdx >= 0) {
780 cur_pic->picture.lum = pic_buf->ref_pic[recIdx].picture.lum;
781 pic_buf->ref_pic[recIdx].picture.lum = 0;
782 } else {
783 hw_cfg->rec_write_disable = 1;
784 }
785 }
786
787 hw_cfg->internal_img_lum_base_w = pic_buf->cur_pic->picture.lum;
788 hw_cfg->internal_img_chr_base_w = pic_buf->cur_pic->picture.cb;
789
790 return MPP_OK;
791 }
792
write_ivf_header(void * hal,RK_U8 * dst)793 void write_ivf_header(void *hal, RK_U8 *dst)
794 {
795 RK_U8 data[IVF_HDR_BYTES] = {0};
796
797 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
798
799 MppEncPrepCfg *prep = &ctx->cfg->prep;
800 MppEncRcCfg *rc = &ctx->cfg->rc;
801
802 data[0] = 'D';
803 data[1] = 'K';
804 data[2] = 'I';
805 data[3] = 'F';
806
807 data[6] = 32;
808
809 data[8] = 'V';
810 data[9] = 'P';
811 data[10] = '8';
812 data[11] = '0';
813
814 data[12] = prep->width & 0xff;
815 data[13] = (prep->width >> 8) & 0xff;
816 data[14] = prep->height & 0xff;
817 data[15] = (prep->height >> 8) & 0xff;
818
819 data[16] = rc->fps_out_num & 0xff;
820 data[17] = (rc->fps_out_num >> 8) & 0xff;
821 data[18] = (rc->fps_out_num >> 16) & 0xff;
822 data[19] = (rc->fps_out_num >> 24) & 0xff;
823
824 data[20] = rc->fps_out_denom & 0xff;
825 data[21] = (rc->fps_out_denom >> 8) & 0xff;
826 data[22] = (rc->fps_out_denom >> 16) & 0xff;
827 data[23] = (rc->fps_out_denom >> 24) & 0xff;
828
829 data[24] = ctx->frame_cnt & 0xff;
830 data[25] = (ctx->frame_cnt >> 8) & 0xff;
831 data[26] = (ctx->frame_cnt >> 16) & 0xff;
832 data[27] = (ctx->frame_cnt >> 24) & 0xff;
833
834 memcpy(dst, data, IVF_HDR_BYTES);
835 }
836
write_ivf_frame(void * hal,RK_U8 * out)837 static void write_ivf_frame(void *hal, RK_U8 *out)
838 {
839 RK_U8 data[IVF_FRM_BYTES];
840
841 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
842 RK_S32 byte_cnt = ctx->frame_size;
843
844 data[0] = byte_cnt & 0xff;
845 data[1] = (byte_cnt >> 8) & 0xff;
846 data[2] = (byte_cnt >> 16) & 0xff;
847 data[3] = (byte_cnt >> 24) & 0xff;
848
849 data[4] = ctx->frame_cnt & 0xff;
850 data[5] = (ctx->frame_cnt >> 8) & 0xff;
851 data[6] = (ctx->frame_cnt >> 16) & 0xff;
852 data[7] = (ctx->frame_cnt >> 24) & 0xff;
853 data[8] = (ctx->frame_cnt >> 32) & 0xff;
854 data[9] = (ctx->frame_cnt >> 40) & 0xff;
855 data[10] = (ctx->frame_cnt >> 48) & 0xff;
856 data[11] = (ctx->frame_cnt >> 56) & 0xff;
857
858 memcpy(out, data, IVF_FRM_BYTES);
859 }
860
set_frame_tag(void * hal)861 static MPP_RET set_frame_tag(void *hal)
862 {
863 RK_S32 tmp = 0;
864 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
865
866 HalVp8ePicBuf *pic_buf = &ctx->picbuf;
867 HalVp8eRefPic *cur_pic = pic_buf->cur_pic;
868 Vp8ePutBitBuf *bitbuf = &ctx->bitbuf[0];
869 RK_S32 pic_height_in_pixel;
870 RK_S32 pic_width_in_pixel;
871 RK_S32 h_scaling;
872 RK_S32 v_scaling;
873
874 tmp = ((ctx->bitbuf[1].byte_cnt) << 5) |
875 ((cur_pic->show ? 1 : 0) << 4) |
876 (ctx->sps.profile << 1) |
877 (cur_pic->i_frame ? 0 : 1);
878
879 vp8e_put_byte(bitbuf, tmp & 0xff);
880
881 vp8e_put_byte(bitbuf, (tmp >> 8) & 0xff);
882
883 vp8e_put_byte(bitbuf, (tmp >> 16) & 0xff);
884
885 if (!cur_pic->i_frame)
886 return MPP_NOK;
887
888 vp8e_put_byte(bitbuf, 0x9d);
889 vp8e_put_byte(bitbuf, 0x01);
890 vp8e_put_byte(bitbuf, 0x2a);
891
892 if (ctx->hw_cfg.input_rotation) {
893 pic_height_in_pixel = ctx->sps.pic_width_in_pixel;
894 pic_width_in_pixel = ctx->sps.pic_height_in_pixel;
895 h_scaling = ctx->sps.vertical_scaling;
896 v_scaling = ctx->sps.horizontal_scaling;
897 } else {
898 pic_height_in_pixel = ctx->sps.pic_height_in_pixel;
899 pic_width_in_pixel = ctx->sps.pic_width_in_pixel;
900 h_scaling = ctx->sps.horizontal_scaling;
901 v_scaling = ctx->sps.vertical_scaling;
902 }
903
904 tmp = pic_width_in_pixel | (h_scaling << 14);
905 vp8e_put_byte(bitbuf, tmp & 0xff);
906 vp8e_put_byte(bitbuf, tmp >> 8);
907
908 tmp = pic_height_in_pixel | (v_scaling << 14);
909 vp8e_put_byte(bitbuf, tmp & 0xff);
910 vp8e_put_byte(bitbuf, tmp >> 8);
911
912 return MPP_OK;
913 }
914
set_data_part_size(void * hal)915 static MPP_RET set_data_part_size(void *hal)
916 {
917 RK_S32 i = 0;
918 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
919
920 if (!ctx->sps.dct_partitions)
921 return MPP_NOK;
922
923 for (i = 2; i < ctx->sps.partition_cnt - 1; i++) {
924 Vp8ePutBitBuf *bitbuf = ctx->bitbuf;
925 RK_S32 tmp = bitbuf[i].data - bitbuf[i].p_data;
926 vp8e_put_byte(&bitbuf[1], tmp & 0xff);
927 vp8e_put_byte(&bitbuf[1], (tmp >> 8) & 0xff);
928 vp8e_put_byte(&bitbuf[1], (tmp >> 16) & 0xff);
929 }
930
931 return MPP_OK;
932 }
933
update_picbuf(HalVp8ePicBuf * picbuf)934 static MPP_RET update_picbuf(HalVp8ePicBuf *picbuf)
935 {
936 RK_S32 i , j;
937
938 HalVp8eRefPic *ref_pic_list = picbuf->ref_pic_list;
939 HalVp8eRefPic *ref_pic = picbuf->ref_pic;
940 HalVp8eRefPic *cur_pic = picbuf->cur_pic;
941
942 picbuf->last_pic = picbuf->cur_pic;
943
944 for (i = 0; i < picbuf->size + 1; i++) {
945 if (&ref_pic[i] == cur_pic)
946 continue;
947 if (cur_pic->ipf)
948 ref_pic[i].ipf = 0;
949 if (cur_pic->grf)
950 ref_pic[i].grf = 0;
951 if (cur_pic->arf)
952 ref_pic[i].arf = 0;
953 }
954
955 for (i = 0; i < picbuf->size; i++) {
956 for (j = 0; j < picbuf->size + 1; j++) {
957 if (ref_pic_list[i].grf)
958 ref_pic[j].grf = 0;
959 if (ref_pic_list[i].arf)
960 ref_pic[j].arf = 0;
961 }
962 }
963
964 for (i = 0; i < picbuf->size; i++) {
965 if (ref_pic_list[i].grf)
966 ref_pic_list[i].refPic->grf = 1;
967 if (ref_pic_list[i].arf)
968 ref_pic_list[i].refPic->arf = 1;
969 }
970
971 for (i = 0; i < picbuf->size + 1; i++) {
972 HalVp8eRefPic *tmp = &ref_pic[i];
973 if (!tmp->ipf && !tmp->arf && !tmp->grf) {
974 picbuf->cur_pic = &ref_pic[i];
975 break;
976 }
977 }
978
979 return MPP_OK;
980 }
981
set_parameter(void * hal)982 static MPP_RET set_parameter(void *hal)
983 {
984 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
985
986 Vp8eSps *sps = &ctx->sps;
987 Vp8eHwCfg *hw_cfg = &ctx->hw_cfg;
988 MppEncPrepCfg *set = &ctx->cfg->prep;
989
990 RK_S32 width = set->width;
991 RK_S32 height = set->height;
992 RK_S32 width_align = MPP_ALIGN(set->width, 16);
993 RK_S32 height_align = MPP_ALIGN(set->height, 16);
994 RK_U32 stride;
995 RK_U32 rotation = 0;
996
997 // do not support mirroring
998 if (set->mirroring)
999 mpp_err_f("Warning: do not support mirroring\n");
1000
1001 if (set->rotation == MPP_ENC_ROT_90)
1002 rotation = 1;
1003 else if (set->rotation == MPP_ENC_ROT_270)
1004 rotation = 2;
1005 else if (set->rotation != MPP_ENC_ROT_0)
1006 mpp_err_f("Warning: only support 90 or 270 degree rotate, request rotate %d", rotation);
1007
1008 if (rotation) {
1009 MPP_SWAP(RK_S32, width, height);
1010 MPP_SWAP(RK_S32, width_align, height_align);
1011 }
1012
1013 stride = get_vepu_pixel_stride(&ctx->stride_cfg, width,
1014 set->hor_stride, set->format);
1015
1016 ctx->mb_per_frame = width_align / 16 * height_align / 16;
1017 ctx->mb_per_row = width_align / 16;
1018 ctx->mb_per_col = height_align / 16;
1019
1020 sps->pic_width_in_pixel = width_align;
1021 sps->pic_height_in_pixel = height_align;
1022 sps->pic_width_in_mbs = width_align / 16;
1023 sps->pic_height_in_mbs = height_align / 16;
1024 sps->horizontal_scaling = 0;
1025 sps->vertical_scaling = 0;
1026 sps->color_type = 0;
1027 sps->clamp_type = 0;
1028 sps->dct_partitions = 0;
1029 sps->partition_cnt = 2 + (1 << sps->dct_partitions);
1030 sps->profile = 1;
1031 sps->filter_type = 0;
1032 sps->filter_level = 0;
1033 sps->filter_sharpness = 0;
1034 sps->auto_filter_level = 1;
1035 sps->auto_filter_sharpness = 1;
1036 sps->quarter_pixel_mv = 1;
1037 sps->split_mv = 1;
1038 sps->refresh_entropy = 1;
1039 memset(sps->sing_bias, 0, sizeof(sps->sing_bias));
1040
1041 sps->filter_delta_enable = 1;
1042 memset(sps->ref_delta, 0, sizeof(sps->ref_delta));
1043 memset(sps->mode_delta, 0, sizeof(sps->mode_delta));
1044
1045 hw_cfg->input_rotation = rotation;
1046
1047 {
1048 RK_U32 tmp = 0;
1049 RK_U32 hor_offset_src = 0;
1050 RK_U32 ver_offset_src = 0;
1051 RK_U8 video_stab = 0;
1052
1053 if (set->format == MPP_FMT_YUV420SP || set->format == MPP_FMT_YUV420P) {
1054 tmp = ver_offset_src;
1055 tmp *= stride;
1056 tmp += hor_offset_src;
1057 hw_cfg->input_lum_base += (tmp & (~7));
1058 hw_cfg->input_luma_base_offset = tmp & 7;
1059
1060 if (video_stab)
1061 hw_cfg->vs_next_luma_base += (tmp & (~7));
1062
1063 if (set->format == MPP_FMT_YUV420P) {
1064 tmp = ver_offset_src / 2;
1065 tmp *= stride / 2;
1066 tmp += hor_offset_src / 2;
1067
1068 hw_cfg->input_cb_base += (tmp & (~7));
1069 hw_cfg->input_cr_base += (tmp & (~7));
1070 hw_cfg->input_chroma_base_offset = tmp & 7;
1071 } else {
1072 tmp = ver_offset_src / 2;
1073 tmp *= stride / 2;
1074 tmp += hor_offset_src / 2;
1075 tmp *= 2;
1076
1077 hw_cfg->input_cb_base += (tmp & (~7));
1078 hw_cfg->input_chroma_base_offset = tmp & 7;
1079 }
1080 } else if (set->format <= MPP_FMT_BGR444 && set->format >= MPP_FMT_RGB565) {
1081 tmp = ver_offset_src;
1082 tmp *= stride;
1083 tmp += hor_offset_src;
1084 tmp *= 2;
1085
1086 hw_cfg->input_lum_base += (tmp & (~7));
1087 hw_cfg->input_luma_base_offset = tmp & 7;
1088 hw_cfg->input_chroma_base_offset = (hw_cfg->input_luma_base_offset / 4) * 4;
1089
1090 if (video_stab)
1091 hw_cfg->vs_next_luma_base += (tmp & (~7));
1092 } else {
1093 tmp = ver_offset_src;
1094 tmp *= stride;
1095 tmp += hor_offset_src;
1096 tmp *= 4;
1097
1098 hw_cfg->input_lum_base += (tmp & (~7));
1099 hw_cfg->input_luma_base_offset = (tmp & 7) / 2;
1100
1101 if (video_stab)
1102 hw_cfg->vs_next_luma_base += (tmp & (~7));
1103 }
1104
1105 hw_cfg->mbs_in_row = width_align / 16;
1106 hw_cfg->mbs_in_col = height_align / 16;
1107 hw_cfg->pixels_on_row = stride;
1108 }
1109 if (width & 0x0F)
1110 hw_cfg->x_fill = (16 - (width & 0x0F)) / 4;
1111 else
1112 hw_cfg->x_fill = 0;
1113
1114 if (height & 0x0F)
1115 hw_cfg->y_fill = 16 - (height & 0x0F);
1116 else
1117 hw_cfg->y_fill = 0;
1118
1119 hw_cfg->vs_mode = 0;
1120
1121 switch (set->color) {
1122 case MPP_FRAME_SPC_RGB: /* BT.601 */
1123 default:
1124 /* Y = 0.2989 R + 0.5866 G + 0.1145 B
1125 * Cb = 0.5647 (B - Y) + 128
1126 * Cr = 0.7132 (R - Y) + 128
1127 */
1128 hw_cfg->rgb_coeff_a = 19589;
1129 hw_cfg->rgb_coeff_b = 38443;
1130 hw_cfg->rgb_coeff_c = 7504;
1131 hw_cfg->rgb_coeff_e = 37008;
1132 hw_cfg->rgb_coeff_f = 46740;
1133 break;
1134
1135 case MPP_FRAME_SPC_BT709: /* BT.709 */
1136 /* Y = 0.2126 R + 0.7152 G + 0.0722 B
1137 * Cb = 0.5389 (B - Y) + 128
1138 * Cr = 0.6350 (R - Y) + 128
1139 */
1140 hw_cfg->rgb_coeff_a = 13933;
1141 hw_cfg->rgb_coeff_b = 46871;
1142 hw_cfg->rgb_coeff_c = 732;
1143 hw_cfg->rgb_coeff_e = 35317;
1144 hw_cfg->rgb_coeff_f = 41615;
1145 break;
1146 }
1147
1148 hw_cfg->r_mask_msb = hw_cfg->g_mask_msb = hw_cfg->b_mask_msb = 0;
1149 VepuFormatCfg fmt_cfg;
1150 if (!get_vepu_fmt(&fmt_cfg, set->format)) {
1151 hw_cfg->input_format = fmt_cfg.format;
1152 hw_cfg->r_mask_msb = fmt_cfg.r_mask;
1153 hw_cfg->g_mask_msb = fmt_cfg.g_mask;
1154 hw_cfg->b_mask_msb = fmt_cfg.b_mask;
1155 } else
1156 return MPP_NOK;
1157
1158 return MPP_OK;
1159 }
1160
set_picbuf(void * hal)1161 static MPP_RET set_picbuf(void *hal)
1162 {
1163 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
1164
1165 // find one dpb for current picture
1166 {
1167 RK_U32 i = 0;
1168 RK_S32 width = ctx->sps.pic_width_in_mbs * 16;
1169 RK_S32 height = ctx->sps.pic_height_in_mbs * 16;
1170 HalVp8ePicBuf *picbuf = &ctx->picbuf;
1171
1172 memset(picbuf->ref_pic, 0, sizeof(picbuf->ref_pic));
1173 memset(picbuf->ref_pic_list, 0, sizeof(picbuf->ref_pic_list));
1174
1175 for (i = 0; i < REF_FRAME_COUNT + 1; i++) {
1176 picbuf->ref_pic[i].picture.lum_width = width;
1177 picbuf->ref_pic[i].picture.lum_height = height;
1178 picbuf->ref_pic[i].picture.ch_width = width / 2;
1179 picbuf->ref_pic[i].picture.ch_height = height / 2;
1180 picbuf->ref_pic[i].picture.lum = 0;
1181 picbuf->ref_pic[i].picture.cb = 0;
1182 }
1183
1184 picbuf->cur_pic = &picbuf->ref_pic[0];
1185 }
1186
1187 ctx->ppss.size = 1;
1188 ctx->ppss.store = (Pps *)mpp_calloc(Pps, 1);
1189 if (ctx->ppss.store == NULL) {
1190 mpp_err("failed to malloc ppss store.\n");
1191 goto __ERR_RET;
1192 }
1193
1194 ctx->ppss.pps = ctx->ppss.store;
1195 ctx->ppss.pps->segment_enabled = 0;
1196 ctx->ppss.pps->sgm.map_modified = 0;
1197
1198 return MPP_OK;
1199
1200 __ERR_RET:
1201 return MPP_NOK;
1202 }
1203
alloc_buffer(void * hal)1204 static MPP_RET alloc_buffer(void *hal)
1205 {
1206 MPP_RET ret = MPP_OK;
1207
1208 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
1209
1210 Vp8eHwCfg *hw_cfg = &ctx->hw_cfg;
1211 MppEncPrepCfg *pre = &ctx->cfg->prep;
1212 RK_U32 mb_total = ctx->mb_per_frame;
1213 Vp8eVpuBuf *buffers = (Vp8eVpuBuf *)ctx->buffers;
1214
1215 //set coding format as VP8
1216 hw_cfg->coding_type = 1;
1217
1218 ret = mpp_buffer_group_get_internal(&buffers->hw_buf_grp,
1219 MPP_BUFFER_TYPE_ION);
1220 if (ret) {
1221 mpp_err("buf group get failed ret %d\n", ret);
1222 goto __ERR_RET;
1223 }
1224
1225 //add 128 bytes to avoid kernel crash
1226 ret = mpp_buffer_get(buffers->hw_buf_grp, &buffers->hw_luma_buf,
1227 MPP_ALIGN(mb_total * (16 * 16), SZ_4K) + SZ_4K);
1228 if (ret) {
1229 mpp_err("hw_luma_buf get failed ret %d\n", ret);
1230 goto __ERR_RET;
1231 }
1232 {
1233 RK_U32 i = 0;
1234 for (i = 0; i < 2; i++) {
1235 //add 128 bytes to avoid kernel crash
1236 ret = mpp_buffer_get(buffers->hw_buf_grp, &buffers->hw_cbcr_buf[i],
1237 MPP_ALIGN(mb_total * (2 * 8 * 8), SZ_4K) + SZ_4K);
1238 if (ret) {
1239 mpp_err("hw_cbcr_buf[%d] get failed ret %d\n", i, ret);
1240 goto __ERR_RET;
1241 }
1242 }
1243 }
1244 hw_cfg->internal_img_lum_base_w = mpp_buffer_get_fd(buffers->hw_luma_buf);
1245 hw_cfg->internal_img_chr_base_w = mpp_buffer_get_fd(buffers->hw_cbcr_buf[0]);
1246
1247 hw_cfg->internal_img_lum_base_r[0] = mpp_buffer_get_fd(buffers->hw_luma_buf);
1248 hw_cfg->internal_img_chr_base_r[0] = mpp_buffer_get_fd(buffers->hw_cbcr_buf[1]);
1249 {
1250 /* NAL size table, table size must be 64-bit multiple,
1251 * space for SEI, MVC prefix, filler and zero at the end of table.
1252 * At least 1 macroblock row in every slice.
1253 * Also used for VP8 partitions. */
1254 RK_U32 size_tbl = MPP_ALIGN(sizeof(RK_U32) * (pre->height + 4), 8);
1255 ret = mpp_buffer_get(buffers->hw_buf_grp, &buffers->hw_size_table_buf, size_tbl);
1256 if (ret) {
1257 mpp_err("hw_size_table_buf get failed ret %d\n", ret);
1258 goto __ERR_RET;
1259 }
1260 }
1261 {
1262 RK_U32 cabac_tbl_size = 8 * 55 + 8 * 96;
1263 ret = mpp_buffer_get(buffers->hw_buf_grp, &buffers->hw_cabac_table_buf,
1264 cabac_tbl_size);
1265 if (ret) {
1266 mpp_err("hw_cabac_table_buf get failed\n");
1267 goto __ERR_RET;
1268 }
1269 }
1270 hw_cfg->cabac_tbl_base = mpp_buffer_get_fd(buffers->hw_cabac_table_buf);
1271
1272 ret = mpp_buffer_get(buffers->hw_buf_grp, &buffers->hw_mv_output_buf,
1273 mb_total * 4);
1274 if (ret) {
1275 mpp_err("hw_mv_output_buf get failed ret %d\n", ret);
1276 goto __ERR_RET;
1277 }
1278
1279 hw_cfg->mv_output_base = mpp_buffer_get_fd(buffers->hw_mv_output_buf);
1280
1281 memset(mpp_buffer_get_ptr(buffers->hw_mv_output_buf), 0, sizeof(RK_U32) * mb_total);
1282 mpp_buffer_sync_end(buffers->hw_mv_output_buf);
1283
1284 ret = mpp_buffer_get(buffers->hw_buf_grp, &buffers->hw_prob_count_buf, VP8_PROB_COUNT_BUF_SIZE);
1285 if (ret) {
1286 mpp_err("hw_prob_count_buf get failed ret %d\n", ret);
1287 goto __ERR_RET;
1288 }
1289
1290 hw_cfg->prob_count_base = mpp_buffer_get_fd(buffers->hw_prob_count_buf);
1291 {
1292 /* VP8: Segmentation map, 4 bits/mb, 64-bit multiple. */
1293 RK_U32 segment_map_size = (mb_total * 4 + 63) / 64 * 8;
1294
1295 ret = mpp_buffer_get(buffers->hw_buf_grp, &buffers->hw_segment_map_buf, segment_map_size);
1296 if (ret) {
1297 mpp_err("hw_segment_map_buf get failed ret %d\n", ret);
1298 goto __ERR_RET;
1299 }
1300
1301 hw_cfg->segment_map_base = mpp_buffer_get_fd(buffers->hw_segment_map_buf);
1302 memset(mpp_buffer_get_ptr(buffers->hw_segment_map_buf), 0, segment_map_size / 4);
1303 mpp_buffer_sync_end(buffers->hw_segment_map_buf);
1304 }
1305 {
1306 RK_U32 i = 0;
1307
1308 ctx->picbuf.size = 1;
1309 for (i = 0; i < 1; i++)
1310 ctx->picbuf.ref_pic[i].picture.lum = mpp_buffer_get_fd(buffers->hw_luma_buf);
1311 for (i = 0; i < 2; i++)
1312 ctx->picbuf.ref_pic[i].picture.cb = mpp_buffer_get_fd(buffers->hw_cbcr_buf[i]);
1313 }
1314 {
1315 RK_U32 pic_size = MPP_ALIGN(pre->width, 16) * MPP_ALIGN(pre->height, 16) * 3 / 2;
1316 RK_U32 out_size = pic_size / 2;
1317
1318 ret = mpp_buffer_get(buffers->hw_buf_grp, &buffers->hw_out_buf, out_size);
1319 if (ret) {
1320 mpp_err("hw_out_buf get failed ret %d\n", ret);
1321 goto __ERR_RET;
1322 }
1323 }
1324 ctx->regs = mpp_calloc(RK_U32, ctx->reg_size);
1325 if (!ctx->regs) {
1326 mpp_err("failed to calloc regs.\n");
1327 goto __ERR_RET;
1328 }
1329
1330 hw_cfg->intra_16_favor = -1;
1331 hw_cfg->prev_mode_favor = -1;
1332 hw_cfg->inter_favor = -1;
1333 hw_cfg->skip_penalty = -1;
1334 hw_cfg->diff_mv_penalty[0] = -1;
1335 hw_cfg->diff_mv_penalty[1] = -1;
1336 hw_cfg->diff_mv_penalty[2] = -1;
1337 hw_cfg->split_penalty[0] = 0;
1338 hw_cfg->split_penalty[1] = 0;
1339 hw_cfg->split_penalty[2] = 0x3FF;
1340 hw_cfg->split_penalty[3] = 0;
1341 hw_cfg->zero_mv_favor = 0;
1342
1343 hw_cfg->intra_area_top = hw_cfg->intra_area_bottom = 255;
1344 hw_cfg->intra_area_left = hw_cfg->intra_area_right = 255;
1345 hw_cfg->roi1_top = hw_cfg->roi1_bottom = 255;
1346 hw_cfg->roi1_left = hw_cfg->roi1_right = 255;
1347 hw_cfg->roi2_top = hw_cfg->roi2_bottom = 255;
1348 hw_cfg->roi2_left = hw_cfg->roi2_right = 255;
1349
1350 return ret;
1351
1352 __ERR_RET:
1353 if (buffers)
1354 hal_vp8e_buf_free(hal);
1355
1356 return ret;
1357 }
1358
hal_vp8e_enc_strm_code(void * hal,HalEncTask * task)1359 MPP_RET hal_vp8e_enc_strm_code(void *hal, HalEncTask *task)
1360 {
1361 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
1362 Vp8eHwCfg *hw_cfg = &ctx->hw_cfg;
1363 VepuOffsetCfg hw_offset;
1364
1365 MppEncCfgSet *cfg = ctx->cfg;
1366 MppEncPrepCfg *prep = &cfg->prep;
1367
1368 {
1369 RK_U32 i = 0;
1370 for (i = 0; i < 9; i++) {
1371 ctx->p_out_buf[i] = NULL;
1372 ctx->stream_size[i] = 0;
1373 }
1374 }
1375
1376 {
1377 hw_offset.fmt = prep->format;
1378
1379 hw_offset.width = prep->width;
1380 hw_offset.height = prep->height;
1381 hw_offset.hor_stride = prep->hor_stride;
1382 hw_offset.ver_stride = prep->ver_stride;
1383 hw_offset.offset_x = mpp_frame_get_offset_x(task->frame);
1384 hw_offset.offset_y = mpp_frame_get_offset_y(task->frame);
1385
1386 get_vepu_offset_cfg(&hw_offset);
1387 }
1388
1389 {
1390 HalEncTask *enc_task = task;
1391
1392 hw_cfg->input_lum_base = mpp_buffer_get_fd(enc_task->input);
1393 hw_cfg->input_cb_base = hw_cfg->input_lum_base;
1394 hw_cfg->input_cr_base = hw_cfg->input_cb_base;
1395 hw_cfg->input_lum_offset = hw_offset.offset_byte[0];
1396 hw_cfg->input_cb_offset = hw_offset.offset_byte[1];
1397 hw_cfg->input_cr_offset = hw_offset.offset_byte[2];
1398 }
1399
1400 // split memory for vp8 partition
1401 {
1402 RK_S32 offset = 0;
1403 Vp8eVpuBuf *buffers = (Vp8eVpuBuf *)ctx->buffers;
1404 RK_U8 *p_end = NULL;
1405 RK_U32 buf_size = mpp_buffer_get_size(buffers->hw_out_buf);
1406 RK_U32 bus_addr = mpp_buffer_get_fd(buffers->hw_out_buf);
1407 RK_U8 *p_start = mpp_buffer_get_ptr(buffers->hw_out_buf);
1408
1409 p_end = p_start + 3;
1410 if (ctx->frame_type == VP8E_FRM_KEY)
1411 p_end += 7;// frame tag len:I frame 10 byte, P frmae 3 byte.
1412 vp8e_set_buffer(&ctx->bitbuf[0], p_start, p_end - p_start);
1413
1414 offset = p_end - p_start;
1415 hw_cfg->output_strm_base = bus_addr;
1416 hw_cfg->output_strm_offset = offset;
1417
1418 p_start = p_end;
1419 p_end = p_start + buf_size / 10;
1420 p_end = (RK_U8 *)((size_t)p_end & ~0x7);
1421 vp8e_set_buffer(&ctx->bitbuf[1], p_start, p_end - p_start);
1422
1423 offset += p_end - p_start;
1424 hw_cfg->partition_Base[0] = bus_addr;
1425 hw_cfg->partition_offset[0] = offset;
1426
1427 p_start = p_end;
1428 p_end = mpp_buffer_get_ptr(buffers->hw_out_buf) + buf_size;
1429 p_end = (RK_U8 *)((size_t)p_end & ~0x7);
1430 vp8e_set_buffer(&ctx->bitbuf[2], p_start, p_end - p_start);
1431
1432 offset += p_end - p_start;
1433 hw_cfg->partition_Base[1] = bus_addr;
1434 hw_cfg->partition_offset[1] = offset;
1435 hw_cfg->output_strm_size = p_end - p_start;
1436
1437 p_start = p_end;
1438 }
1439
1440 {
1441 HalVp8ePicBuf *pic_buf = &ctx->picbuf;
1442
1443 pic_buf->cur_pic->show = 1;
1444 pic_buf->cur_pic->poc = ctx->frame_cnt;
1445 pic_buf->cur_pic->i_frame = (ctx->frame_type == VP8E_FRM_KEY);
1446
1447 init_picbuf(ctx);
1448
1449 if (ctx->frame_type == VP8E_FRM_P) {
1450 pic_buf->cur_pic->p_frame = 1;
1451 pic_buf->cur_pic->arf = 1;
1452 pic_buf->cur_pic->grf = 1;
1453 pic_buf->cur_pic->ipf = 1;
1454 pic_buf->ref_pic_list[0].search = 1;
1455 pic_buf->ref_pic_list[1].search = 1;
1456 pic_buf->ref_pic_list[2].search = 1;
1457 }
1458
1459 if (ctx->rc->frame_coded == 0)
1460 return MPP_OK;
1461
1462 if (ctx->rc->golden_picture_rate) {
1463 pic_buf->cur_pic->grf = 1;
1464 if (!pic_buf->cur_pic->arf)
1465 pic_buf->ref_pic_list[1].arf = 1;
1466 }
1467 }
1468 set_frame_params(ctx);
1469 set_picbuf_ref(ctx);
1470 set_code_frame(ctx);
1471
1472 return MPP_OK;
1473 }
1474
hal_vp8e_init_qp_table(void * hal)1475 MPP_RET hal_vp8e_init_qp_table(void *hal)
1476 {
1477 RK_S32 i = 0, j = 0;
1478 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
1479
1480 for (i = 0; i < QINDEX_RANGE; i++) {
1481 RK_S32 tmp = 0;
1482 Vp8eQp * qp = &ctx->qp_y1[i];
1483
1484 for (j = 0; j < 2; j++) {
1485 if (j == 0) {
1486 tmp = dc_q_lookup_tbl[MPP_CLIP3(0, QINDEX_RANGE - 1, i)];
1487 } else {
1488 tmp = ac_q_lookup_tbl[MPP_CLIP3(0, QINDEX_RANGE - 1, i)];
1489 }
1490
1491 qp->quant[j] = MPP_MIN((1 << 16) / tmp, 0x3FFF);
1492 qp->zbin[j] = ((q_zbin_factors_tbl[i] * tmp) + 64) >> 7;
1493 qp->round[j] = (q_rounding_factors_tbl[i] * tmp) >> 7;
1494 qp->dequant[j] = tmp;
1495 }
1496
1497 qp = &ctx->qp_y2[i];
1498 for (j = 0; j < 2; j++) {
1499 if (j == 0) {
1500 tmp = dc_q_lookup_tbl[MPP_CLIP3(0, QINDEX_RANGE - 1, i)];
1501 tmp = tmp * 2;
1502 } else {
1503 tmp = ac_q_lookup_tbl[MPP_CLIP3(0, QINDEX_RANGE - 1, i)];
1504 tmp = (tmp * 155) / 100;
1505 if (tmp < 8)
1506 tmp = 8;
1507 }
1508 qp->quant[j] = MPP_MIN((1 << 16) / tmp, 0x3FFF);
1509 qp->zbin[j] = ((q_zbin_factors_tbl[i] * tmp) + 64) >> 7;
1510 qp->round[j] = (q_rounding_factors_tbl[i] * tmp) >> 7;
1511 qp->dequant[j] = tmp;
1512 }
1513
1514 qp = &ctx->qp_ch[i];
1515 for (j = 0; j < 2; j++) {
1516 if (j == 0) {
1517 tmp = dc_q_lookup_tbl[MPP_CLIP3(0, QINDEX_RANGE - 1, i)];
1518 if (tmp > 132)
1519 tmp = 132;
1520 } else {
1521 tmp = ac_q_lookup_tbl[MPP_CLIP3(0, QINDEX_RANGE - 1, i)];
1522 }
1523 qp->quant[j] = MPP_MIN((1 << 16) / tmp, 0x3FFF);
1524 qp->zbin[j] = ((q_zbin_factors_tbl[i] * tmp) + 64) >> 7;
1525 qp->round[j] = (q_rounding_factors_tbl[i] * tmp) >> 7;
1526 qp->dequant[j] = tmp;
1527 }
1528 }
1529
1530 return MPP_OK;
1531 }
1532
hal_vp8e_update_buffers(void * hal,HalEncTask * task)1533 MPP_RET hal_vp8e_update_buffers(void *hal, HalEncTask *task)
1534 {
1535 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
1536 Vp8eVpuBuf *buffers = (Vp8eVpuBuf *)ctx->buffers;
1537 const RK_U32 hw_offset = ctx->hw_cfg.first_free_bit / 8;
1538 RK_U32 *part = (RK_U32 *)mpp_buffer_get_ptr(buffers->hw_size_table_buf);
1539
1540 mpp_buffer_sync_begin(buffers->hw_size_table_buf);
1541
1542 ctx->bitbuf[1].byte_cnt += part[0] - hw_offset;
1543 ctx->bitbuf[1].data += part[0] - hw_offset;
1544
1545 ctx->bitbuf[2].byte_cnt = part[1];
1546 ctx->bitbuf[2].data += part[1];
1547 ctx->bitbuf[3].byte_cnt = part[2];
1548 ctx->bitbuf[3].data += part[2];
1549
1550 set_frame_tag(ctx);
1551
1552 if (vp8e_buffer_gap(&ctx->bitbuf[1], 4) == MPP_OK) {
1553 set_data_part_size(ctx);
1554 }
1555
1556 ctx->prev_frame_lost = 0;
1557
1558 ctx->p_out_buf[0] = (RK_U32 *)ctx->bitbuf[0].p_data;
1559 ctx->p_out_buf[1] = (RK_U32 *)ctx->bitbuf[2].p_data;
1560 if (ctx->sps.dct_partitions)
1561 ctx->p_out_buf[2] = (RK_U32 *)ctx->bitbuf[3].p_data;
1562
1563 ctx->stream_size[0] = ctx->bitbuf[0].byte_cnt +
1564 ctx->bitbuf[1].byte_cnt;
1565 ctx->stream_size[1] = ctx->bitbuf[2].byte_cnt;
1566
1567 if (ctx->sps.dct_partitions)
1568 ctx->stream_size[2] = ctx->bitbuf[3].byte_cnt;
1569
1570 ctx->frame_size = ctx->stream_size[0] + ctx->stream_size[1] +
1571 ctx->stream_size[2];
1572
1573 update_picbuf(&ctx->picbuf);
1574 {
1575 HalEncTask *enc_task = task;
1576 RK_U8 *p_out = mpp_buffer_get_ptr(enc_task->output);
1577 RK_S32 disable_ivf = ctx->cfg->vp8.disable_ivf;
1578
1579 mpp_buffer_sync_begin(buffers->hw_size_table_buf);
1580
1581 if (!disable_ivf) {
1582 p_out += enc_task->length;
1583
1584 if (ctx->frame_size) {
1585 write_ivf_frame(ctx, p_out);
1586
1587 p_out += IVF_FRM_BYTES;
1588 enc_task->length += IVF_FRM_BYTES;
1589 }
1590 }
1591
1592 memcpy(p_out, ctx->p_out_buf[0], ctx->stream_size[0]);
1593 p_out += ctx->stream_size[0];
1594 enc_task->length += ctx->stream_size[0];
1595
1596 memcpy(p_out, ctx->p_out_buf[1], ctx->stream_size[1]);
1597 p_out += ctx->stream_size[1];
1598 enc_task->length += ctx->stream_size[1];
1599
1600 memcpy(p_out, ctx->p_out_buf[2], ctx->stream_size[2]);
1601 p_out += ctx->stream_size[2];
1602 enc_task->length += ctx->stream_size[2];
1603
1604 mpp_buffer_sync_end(buffers->hw_size_table_buf);
1605 }
1606 return MPP_OK;
1607 }
1608
hal_vp8e_setup(void * hal)1609 MPP_RET hal_vp8e_setup(void *hal)
1610 {
1611 MPP_RET ret = MPP_OK;
1612 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
1613
1614 if (set_parameter(ctx)) {
1615 mpp_err("set vp8e parameter failed");
1616 return MPP_NOK;
1617 }
1618
1619 if (set_picbuf(ctx)) {
1620 mpp_err("set vp8e picbuf failed, no enough memory");
1621 return MPP_ERR_NOMEM;
1622 }
1623
1624 ret = alloc_buffer(ctx);
1625
1626 return ret;
1627 }
1628
hal_vp8e_buf_free(void * hal)1629 MPP_RET hal_vp8e_buf_free(void *hal)
1630 {
1631 HalVp8eCtx *ctx = (HalVp8eCtx *)hal;
1632 Vp8eVpuBuf *buffers = (Vp8eVpuBuf *)ctx->buffers;
1633
1634 if (buffers->hw_luma_buf) {
1635 mpp_buffer_put(buffers->hw_luma_buf);
1636 buffers->hw_luma_buf = NULL;
1637 }
1638
1639 {
1640 RK_U32 i = 0;
1641 for (i = 0; i < 2; i++) {
1642 if (buffers->hw_cbcr_buf[i]) {
1643 mpp_buffer_put(buffers->hw_cbcr_buf[i]);
1644 buffers->hw_cbcr_buf[i] = NULL;
1645 }
1646 }
1647 }
1648
1649 if (buffers->hw_size_table_buf) {
1650 mpp_buffer_put(buffers->hw_size_table_buf);
1651 buffers->hw_size_table_buf = NULL;
1652 }
1653
1654 if (buffers->hw_cabac_table_buf) {
1655 mpp_buffer_put(buffers->hw_cabac_table_buf);
1656 buffers->hw_cabac_table_buf = NULL;
1657 }
1658
1659 if (buffers->hw_mv_output_buf) {
1660 mpp_buffer_put(buffers->hw_mv_output_buf);
1661 buffers->hw_mv_output_buf = NULL;
1662 }
1663
1664 if (buffers->hw_prob_count_buf) {
1665 mpp_buffer_put(buffers->hw_prob_count_buf);
1666 buffers->hw_prob_count_buf = NULL;
1667 }
1668
1669 if (buffers->hw_segment_map_buf) {
1670 mpp_buffer_put(buffers->hw_segment_map_buf);
1671 buffers->hw_segment_map_buf = NULL;
1672 }
1673
1674 if (buffers->hw_out_buf) {
1675 mpp_buffer_put(buffers->hw_out_buf);
1676 buffers->hw_out_buf = NULL;
1677 }
1678
1679 if (buffers->hw_buf_grp) {
1680 mpp_buffer_group_put(buffers->hw_buf_grp);
1681 buffers->hw_buf_grp = NULL;
1682 }
1683
1684 return MPP_OK;
1685 }
1686