1 /*
2 * Copyright 2015 Rockchip Electronics Co. LTD
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define MODULE_TAG "utils"
18
19 #include <ctype.h>
20 #include <limits.h>
21 #include <string.h>
22
23 #include "mpp_mem.h"
24 #include "mpp_log.h"
25 #include "mpp_lock.h"
26 #include "mpp_time.h"
27 #include "mpp_common.h"
28 #include "utils.h"
29
30 #define MAX_HALF_WORD_SUM_CNT \
31 ((RK_ULONG)((0-1) / ((1UL << ((__SIZEOF_POINTER__ * 8) / 2)) - 1)))
32 #define CAL_BYTE (__SIZEOF_POINTER__ >> 1)
33
_show_options(int count,OptionInfo * options)34 void _show_options(int count, OptionInfo *options)
35 {
36 int i;
37 for (i = 0; i < count; i++) {
38 if (NULL == options[i].name)
39 continue;
40
41 mpp_log("-%s %-16s\t%s\n",
42 options[i].name, options[i].argname, options[i].help);
43 }
44 }
45
dump_mpp_frame_to_file(MppFrame frame,FILE * fp)46 void dump_mpp_frame_to_file(MppFrame frame, FILE *fp)
47 {
48 RK_U32 width = 0;
49 RK_U32 height = 0;
50 RK_U32 h_stride = 0;
51 RK_U32 v_stride = 0;
52 MppFrameFormat fmt = MPP_FMT_YUV420SP;
53 MppBuffer buffer = NULL;
54 RK_U8 *base = NULL;
55
56 if (NULL == fp || NULL == frame)
57 return ;
58
59 width = mpp_frame_get_width(frame);
60 height = mpp_frame_get_height(frame);
61 h_stride = mpp_frame_get_hor_stride(frame);
62 v_stride = mpp_frame_get_ver_stride(frame);
63 fmt = mpp_frame_get_fmt(frame);
64 buffer = mpp_frame_get_buffer(frame);
65
66 if (NULL == buffer)
67 return ;
68
69 base = (RK_U8 *)mpp_buffer_get_ptr(buffer);
70
71 if (MPP_FRAME_FMT_IS_RGB(fmt) && MPP_FRAME_FMT_IS_LE(fmt)) {
72 fmt &= MPP_FRAME_FMT_MASK;
73 }
74 switch (fmt & MPP_FRAME_FMT_MASK) {
75 case MPP_FMT_YUV422SP : {
76 /* YUV422SP -> YUV422P for better display */
77 RK_U32 i, j;
78 RK_U8 *base_y = base;
79 RK_U8 *base_c = base + h_stride * v_stride;
80 RK_U8 *tmp = mpp_malloc(RK_U8, h_stride * height * 2);
81 RK_U8 *tmp_u = tmp;
82 RK_U8 *tmp_v = tmp + width * height / 2;
83
84 for (i = 0; i < height; i++, base_y += h_stride)
85 fwrite(base_y, 1, width, fp);
86
87 for (i = 0; i < height; i++, base_c += h_stride) {
88 for (j = 0; j < width / 2; j++) {
89 tmp_u[j] = base_c[2 * j + 0];
90 tmp_v[j] = base_c[2 * j + 1];
91 }
92 tmp_u += width / 2;
93 tmp_v += width / 2;
94 }
95
96 fwrite(tmp, 1, width * height, fp);
97 mpp_free(tmp);
98 } break;
99 case MPP_FMT_YUV420SP_VU :
100 case MPP_FMT_YUV420SP : {
101 RK_U32 i;
102 RK_U8 *base_y = base;
103 RK_U8 *base_c = base + h_stride * v_stride;
104
105 for (i = 0; i < height; i++, base_y += h_stride) {
106 fwrite(base_y, 1, width, fp);
107 }
108 for (i = 0; i < height / 2; i++, base_c += h_stride) {
109 fwrite(base_c, 1, width, fp);
110 }
111 } break;
112 case MPP_FMT_YUV420P : {
113 RK_U32 i;
114 RK_U8 *base_y = base;
115 RK_U8 *base_c = base + h_stride * v_stride;
116
117 for (i = 0; i < height; i++, base_y += h_stride) {
118 fwrite(base_y, 1, width, fp);
119 }
120 for (i = 0; i < height / 2; i++, base_c += h_stride / 2) {
121 fwrite(base_c, 1, width / 2, fp);
122 }
123 for (i = 0; i < height / 2; i++, base_c += h_stride / 2) {
124 fwrite(base_c, 1, width / 2, fp);
125 }
126 } break;
127 case MPP_FMT_YUV420SP_10BIT : {
128 RK_U32 i, k;
129 RK_U8 *base_y = base;
130 RK_U8 *base_c = base + h_stride * v_stride;
131 RK_U8 *tmp_line = (RK_U8 *)mpp_malloc(RK_U16, width);
132
133 if (!tmp_line) {
134 mpp_log("tmp_line malloc fail");
135 return;
136 }
137
138 for (i = 0; i < height; i++, base_y += h_stride) {
139 for (k = 0; k < width / 8; k++) {
140 RK_U16 *pix = (RK_U16 *)(tmp_line + k * 16);
141 RK_U16 *base_u16 = (RK_U16 *)(base_y + k * 10);
142
143 pix[0] = base_u16[0] & 0x03FF;
144 pix[1] = (base_u16[0] & 0xFC00) >> 10 | (base_u16[1] & 0x000F) << 6;
145 pix[2] = (base_u16[1] & 0x3FF0) >> 4;
146 pix[3] = (base_u16[1] & 0xC000) >> 14 | (base_u16[2] & 0x00FF) << 2;
147 pix[4] = (base_u16[2] & 0xFF00) >> 8 | (base_u16[3] & 0x0003) << 8;
148 pix[5] = (base_u16[3] & 0x0FFC) >> 2;
149 pix[6] = (base_u16[3] & 0xF000) >> 12 | (base_u16[4] & 0x003F) << 4;
150 pix[7] = (base_u16[4] & 0xFFC0) >> 6;
151 }
152 fwrite(tmp_line, width * sizeof(RK_U16) , 1, fp);
153 }
154
155 for (i = 0; i < height / 2; i++, base_c += h_stride) {
156 for (k = 0; k < (width / 8); k++) {
157 RK_U16 *pix = (RK_U16 *)(tmp_line + k * 16);
158 RK_U16 *base_u16 = (RK_U16 *)(base_c + k * 10);
159
160 pix[0] = base_u16[0] & 0x03FF;
161 pix[1] = (base_u16[0] & 0xFC00) >> 10 | (base_u16[1] & 0x000F) << 6;
162 pix[2] = (base_u16[1] & 0x3FF0) >> 4;
163 pix[3] = (base_u16[1] & 0xC000) >> 14 | (base_u16[2] & 0x00FF) << 2;
164 pix[4] = (base_u16[2] & 0xFF00) >> 8 | (base_u16[3] & 0x0003) << 8;
165 pix[5] = (base_u16[3] & 0x0FFC) >> 2;
166 pix[6] = (base_u16[3] & 0xF000) >> 12 | (base_u16[4] & 0x003F) << 4;
167 pix[7] = (base_u16[4] & 0xFFC0) >> 6;
168 }
169 fwrite(tmp_line, width * sizeof(RK_U16) , 1, fp);
170 }
171
172 MPP_FREE(tmp_line);
173 } break;
174 case MPP_FMT_YUV444SP : {
175 /* YUV444SP -> YUV444P for better display */
176 RK_U32 i, j;
177 RK_U8 *base_y = base;
178 RK_U8 *base_c = base + h_stride * v_stride;
179 RK_U8 *tmp = mpp_malloc(RK_U8, h_stride * height * 2);
180 RK_U8 *tmp_u = tmp;
181 RK_U8 *tmp_v = tmp + width * height;
182
183 for (i = 0; i < height; i++, base_y += h_stride)
184 fwrite(base_y, 1, width, fp);
185
186 for (i = 0; i < height; i++, base_c += h_stride * 2) {
187 for (j = 0; j < width; j++) {
188 tmp_u[j] = base_c[2 * j + 0];
189 tmp_v[j] = base_c[2 * j + 1];
190 }
191 tmp_u += width;
192 tmp_v += width;
193 }
194
195 fwrite(tmp, 1, width * height * 2, fp);
196 mpp_free(tmp);
197 } break;
198 case MPP_FMT_YUV400: {
199 RK_U32 i;
200 RK_U8 *base_y = base;
201 RK_U8 *tmp = mpp_malloc(RK_U8, h_stride * height);
202
203 for (i = 0; i < height; i++, base_y += h_stride)
204 fwrite(base_y, 1, width, fp);
205
206 mpp_free(tmp);
207 } break;
208 case MPP_FMT_ARGB8888:
209 case MPP_FMT_ABGR8888:
210 case MPP_FMT_BGRA8888:
211 case MPP_FMT_RGBA8888: {
212 RK_U32 i;
213 RK_U8 *base_y = base;
214 RK_U8 *tmp = mpp_malloc(RK_U8, width * height * 4);
215
216 for (i = 0; i < height; i++, base_y += h_stride)
217 fwrite(base_y, 1, width * 4, fp);
218
219 mpp_free(tmp);
220 } break;
221 case MPP_FMT_RGB565:
222 case MPP_FMT_BGR565:
223 case MPP_FMT_RGB555:
224 case MPP_FMT_BGR555:
225 case MPP_FMT_RGB444:
226 case MPP_FMT_BGR444: {
227 RK_U32 i;
228 RK_U8 *base_y = base;
229 RK_U8 *tmp = mpp_malloc(RK_U8, width * height * 2);
230
231 for (i = 0; i < height; i++, base_y += h_stride)
232 fwrite(base_y, 1, width * 2, fp);
233
234 mpp_free(tmp);
235 } break;
236 default : {
237 mpp_err("not supported format %d\n", fmt);
238 } break;
239 }
240 }
241
wide_bit_sum(RK_U8 * data,RK_U32 len,RK_ULONG * sum)242 void wide_bit_sum(RK_U8 *data, RK_U32 len, RK_ULONG *sum)
243 {
244 RK_U8 *data8 = NULL;
245 RK_U32 loop;
246 data8 = data;
247 #if LONG_MAX == INT_MAX
248 RK_U16 *data_rk = NULL;
249 data_rk = (RK_U16 *)data;
250 #else
251 RK_U32 *data_rk = NULL;
252 data_rk = (RK_U32 *)data;
253 #endif
254
255 for (loop = 0; loop < len / CAL_BYTE; loop++) {
256 *sum += data_rk[loop];
257 }
258 for (loop = len / CAL_BYTE * CAL_BYTE; loop < len; loop++) {
259 *sum += data8[loop];
260 }
261
262 return;
263 }
264
calc_data_crc(RK_U8 * dat,RK_U32 len,DataCrc * crc)265 void calc_data_crc(RK_U8 *dat, RK_U32 len, DataCrc *crc)
266 {
267 RK_ULONG data_grp_byte_cnt = MAX_HALF_WORD_SUM_CNT * CAL_BYTE;
268 RK_U32 i = 0, grp_loop = 0;
269 RK_U8 *dat8 = NULL;
270 RK_U32 *dat32 = NULL;
271 RK_U32 xor = 0;
272
273 /*calc sum */
274 crc->sum_cnt = (len + data_grp_byte_cnt - 1) / data_grp_byte_cnt;
275 for (grp_loop = 0; grp_loop < len / data_grp_byte_cnt; grp_loop++) {
276 wide_bit_sum(&dat[grp_loop * data_grp_byte_cnt], data_grp_byte_cnt, &crc->sum[grp_loop]);
277 }
278 if (len % data_grp_byte_cnt) {
279 wide_bit_sum(&dat[grp_loop * data_grp_byte_cnt], len % data_grp_byte_cnt, &crc->sum[grp_loop]);
280 }
281
282 /*calc xor */
283 dat32 = (RK_U32 *)dat;
284 for (i = 0; i < len / 4; i++)
285 xor ^= dat32[i];
286
287 if (len % 4) {
288 RK_U32 val = 0;
289 dat8 = (RK_U8 *)&val;
290 for (i = (len / 4) * 4; i < len; i++)
291 dat8[i % 4] = dat[i];
292 xor ^= val;
293 }
294
295 crc->len = len;
296 crc->vor = xor;
297 }
298
write_data_crc(FILE * fp,DataCrc * crc)299 void write_data_crc(FILE *fp, DataCrc *crc)
300 {
301 RK_U32 loop = 0;
302
303 if (fp) {
304 fprintf(fp, "%08d,", crc->len);
305 for (loop = 0; loop < crc->sum_cnt; loop++) {
306 fprintf(fp, " %lx,", crc->sum[loop]);
307 }
308 fprintf(fp, " %08x\n", crc->vor);
309 fflush(fp);
310 }
311 }
312
read_data_crc(FILE * fp,DataCrc * crc)313 void read_data_crc(FILE *fp, DataCrc *crc)
314 {
315 RK_U32 loop = 0;
316
317 if (fp) {
318 RK_S32 ret = 0;
319 ret = fscanf(fp, "%8d", &crc->len);
320 for (loop = 0; loop < crc->sum_cnt; loop++) {
321 ret |= fscanf(fp, "%lx", &crc->sum[loop]);
322 }
323 ret |= fscanf(fp, "%08x", &crc->vor);
324 if (ret == EOF)
325 mpp_err_f("unexpected EOF found\n");
326 }
327 }
328
calc_frm_crc(MppFrame frame,FrmCrc * crc)329 void calc_frm_crc(MppFrame frame, FrmCrc *crc)
330 {
331 RK_ULONG data_grp_byte_cnt = MAX_HALF_WORD_SUM_CNT * CAL_BYTE;
332 RK_U32 grp_line_cnt = 0;
333 RK_U32 grp_cnt = 0;
334
335 RK_U32 y = 0, x = 0;
336 RK_U8 *dat8 = NULL;
337 RK_U32 *dat32 = NULL;
338 RK_U32 xor = 0;
339
340 RK_U32 width = mpp_frame_get_width(frame);
341 RK_U32 height = mpp_frame_get_height(frame);
342 RK_U32 stride = mpp_frame_get_hor_stride(frame);
343 RK_U8 *buf = (RK_U8 *)mpp_buffer_get_ptr(mpp_frame_get_buffer(frame));
344
345 grp_line_cnt = data_grp_byte_cnt / ((width + CAL_BYTE - 1) / CAL_BYTE * CAL_BYTE);
346
347 /* luma */
348 grp_cnt = (height + grp_line_cnt - 1) / grp_line_cnt;
349 crc->luma.sum_cnt = grp_cnt;
350
351 dat8 = buf;
352 for (y = 0; y < height / grp_line_cnt * grp_line_cnt; y++) {
353 wide_bit_sum(&dat8[y * stride], width, &crc->luma.sum[y / grp_line_cnt]);
354 }
355 if (height % grp_line_cnt) {
356 for (y = height / grp_line_cnt * grp_line_cnt; y < height; y++) {
357 wide_bit_sum(&dat8[y * stride], width, &crc->luma.sum[y / grp_line_cnt]);
358 }
359 }
360
361 dat8 = buf;
362 for (y = 0; y < height; y++) {
363 dat32 = (RK_U32 *)&dat8[y * stride];
364 for (x = 0; x < width / 4; x++)
365 xor ^= dat32[x];
366 }
367 crc->luma.len = height * width;
368 crc->luma.vor = xor;
369
370 /* chroma */
371 grp_cnt = (height / 2 + grp_line_cnt - 1) / grp_line_cnt;
372 crc->chroma.sum_cnt = grp_cnt;
373
374 dat8 = buf + height * stride;
375 for (y = 0; y < height / 2 / grp_line_cnt * grp_line_cnt; y++) {
376 wide_bit_sum(&dat8[y * stride], width, &crc->chroma.sum[y / grp_line_cnt]);
377 }
378 if (height / 2 % grp_line_cnt) {
379 for (y = height / 2 / grp_line_cnt * grp_line_cnt; y < height / 2; y++) {
380 wide_bit_sum(&dat8[y * stride], width, &crc->chroma.sum[y / grp_line_cnt]);
381 }
382 }
383
384 dat8 = buf + height * stride;
385 for (y = 0; y < height / 2; y++) {
386 dat32 = (RK_U32 *)&dat8[y * stride];
387 for (x = 0; x < width / 4; x++)
388 xor ^= dat32[x];
389 }
390 crc->chroma.len = height * width / 2;
391 crc->chroma.vor = xor;
392 }
393
write_frm_crc(FILE * fp,FrmCrc * crc)394 void write_frm_crc(FILE *fp, FrmCrc *crc)
395 {
396 RK_U32 loop = 0;
397
398 if (fp) {
399 // luma
400 fprintf(fp, "%d,", crc->luma.len);
401 for (loop = 0; loop < crc->luma.sum_cnt; loop++) {
402 fprintf(fp, " %lx,", crc->luma.sum[loop]);
403 }
404 fprintf(fp, " %08x,", crc->luma.vor);
405
406 // chroma
407 fprintf(fp, " %d,", crc->chroma.len);
408 for (loop = 0; loop < crc->chroma.sum_cnt; loop++) {
409 fprintf(fp, " %lx,", crc->chroma.sum[loop]);
410 }
411 fprintf(fp, " %08x\n", crc->chroma.vor);
412
413 fflush(fp);
414 }
415 }
416
read_frm_crc(FILE * fp,FrmCrc * crc)417 void read_frm_crc(FILE *fp, FrmCrc *crc)
418 {
419 RK_U32 loop = 0;
420
421 if (fp) {
422 RK_S32 ret = 0;
423 // luma
424 ret = fscanf(fp, "%d", &crc->luma.len);
425 for (loop = 0; loop < crc->luma.sum_cnt; loop++) {
426 ret |= fscanf(fp, "%lx", &crc->luma.sum[loop]);
427 }
428 ret |= fscanf(fp, "%08x", &crc->luma.vor);
429
430 // chroma
431 ret |= fscanf(fp, "%d", &crc->chroma.len);
432 for (loop = 0; loop < crc->chroma.sum_cnt; loop++) {
433 ret |= fscanf(fp, "%lx", &crc->chroma.sum[loop]);
434 }
435 ret |= fscanf(fp, "%08x", &crc->chroma.vor);
436 if (ret == EOF)
437 mpp_err_f("unexpected EOF found\n");
438 }
439 }
440
read_with_pixel_width(RK_U8 * buf,RK_S32 width,RK_S32 height,RK_S32 hor_stride,RK_S32 pix_w,FILE * fp)441 static MPP_RET read_with_pixel_width(RK_U8 *buf, RK_S32 width, RK_S32 height,
442 RK_S32 hor_stride, RK_S32 pix_w, FILE *fp)
443 {
444 RK_S32 row;
445 MPP_RET ret = MPP_OK;
446
447 if (hor_stride < width * pix_w) {
448 mpp_err_f("invalid %dbit color config: hor_stride %d is smaller then width %d multiply by 4\n",
449 8 * pix_w, hor_stride, width, pix_w);
450 mpp_err_f("width should be defined by pixel count\n");
451 mpp_err_f("stride should be defined by byte count\n");
452
453 hor_stride = width * pix_w;
454 }
455
456 for (row = 0; row < height; row++) {
457 RK_S32 read_size = fread(buf + row * hor_stride, 1, width * pix_w, fp);
458 if (feof(fp)) {
459 ret = MPP_NOK;
460 break;
461 }
462 if (read_size != width * pix_w) {
463 mpp_err_f("read file failed expect %d vs %d\n",
464 width * pix_w, read_size);
465 ret = MPP_NOK;
466 }
467 }
468
469 return ret;
470 }
471
read_image(RK_U8 * buf,FILE * fp,RK_U32 width,RK_U32 height,RK_U32 hor_stride,RK_U32 ver_stride,MppFrameFormat fmt)472 MPP_RET read_image(RK_U8 *buf, FILE *fp, RK_U32 width, RK_U32 height,
473 RK_U32 hor_stride, RK_U32 ver_stride, MppFrameFormat fmt)
474 {
475 MPP_RET ret = MPP_OK;
476 RK_U32 read_size;
477 RK_U32 row = 0;
478 RK_U8 *buf_y = buf;
479 RK_U8 *buf_u = buf_y + hor_stride * ver_stride; // NOTE: diff from gen_yuv_image
480 RK_U8 *buf_v = buf_u + hor_stride * ver_stride / 4; // NOTE: diff from gen_yuv_image
481
482 if (MPP_FRAME_FMT_IS_FBC(fmt)) {
483 RK_U32 align_w = MPP_ALIGN(width, 16);
484 RK_U32 align_h = MPP_ALIGN(height, 16);
485 RK_U32 header_size = 0;
486
487 if ((fmt & MPP_FRAME_FBC_MASK) == MPP_FRAME_FBC_AFBC_V1)
488 header_size = MPP_ALIGN(align_w * align_h / 16, SZ_4K);
489 else
490 header_size = align_w * align_h / 16;
491
492 /* read fbc header first */
493 read_size = fread(buf, 1, header_size, fp);
494 if (read_size != header_size) {
495 mpp_err_f("read fbc file header failed %d vs %d\n",
496 read_size, header_size);
497 ret = MPP_NOK;
498 goto err;
499 }
500 buf += header_size;
501
502 switch (fmt & MPP_FRAME_FMT_MASK) {
503 case MPP_FMT_YUV420SP : {
504 read_size = fread(buf, 1, align_w * align_h * 3 / 2, fp);
505 if (read_size != align_w * align_h * 3 / 2) {
506 mpp_err_f("read 420sp fbc file payload failed %d vs %d\n",
507 read_size, align_w * align_h * 3 / 2);
508 ret = MPP_NOK;
509 goto err;
510 }
511 } break;
512 case MPP_FMT_YUV422SP :
513 case MPP_FMT_YUV422_YUYV :
514 case MPP_FMT_YUV422_YVYU :
515 case MPP_FMT_YUV422_UYVY :
516 case MPP_FMT_YUV422_VYUY : {
517 read_size = fread(buf, 1, align_w * align_h * 2, fp);
518 if (read_size != align_w * align_h * 2) {
519 mpp_err_f("read 422sp fbc file payload failed %d vs %d\n",
520 read_size, align_w * align_h * 2);
521 ret = MPP_NOK;
522 goto err;
523 }
524 } break;
525 default : {
526 mpp_err_f("not supported fbc format %x\n", fmt);
527 } break;
528 }
529
530 return MPP_OK;
531 }
532
533 switch (fmt & MPP_FRAME_FMT_MASK) {
534 case MPP_FMT_YUV420SP : {
535 for (row = 0; row < height; row++) {
536 read_size = fread(buf_y + row * hor_stride, 1, width, fp);
537 if (read_size != width) {
538 ret = MPP_NOK;
539 goto err;
540 }
541 }
542
543 for (row = 0; row < height / 2; row++) {
544 read_size = fread(buf_u + row * hor_stride, 1, width, fp);
545 if (read_size != width) {
546 ret = MPP_NOK;
547 goto err;
548 }
549 }
550 } break;
551 case MPP_FMT_YUV420P : {
552 for (row = 0; row < height; row++) {
553 read_size = fread(buf_y + row * hor_stride, 1, width, fp);
554 if (read_size != width) {
555 ret = MPP_NOK;
556 goto err;
557 }
558 }
559
560 for (row = 0; row < height / 2; row++) {
561 read_size = fread(buf_u + row * hor_stride / 2, 1, width / 2, fp);
562 if (read_size != width / 2) {
563 ret = MPP_NOK;
564 goto err;
565 }
566 }
567
568 for (row = 0; row < height / 2; row++) {
569 read_size = fread(buf_v + row * hor_stride / 2, 1, width / 2, fp);
570 if (read_size != width / 2) {
571 ret = MPP_NOK;
572 goto err;
573 }
574 }
575 } break;
576 case MPP_FMT_ARGB8888 :
577 case MPP_FMT_ABGR8888 :
578 case MPP_FMT_BGRA8888 :
579 case MPP_FMT_RGBA8888 :
580 case MPP_FMT_RGB101010 :
581 case MPP_FMT_BGR101010 : {
582 ret = read_with_pixel_width(buf_y, width, height, hor_stride, 4, fp);
583 } break;
584 case MPP_FMT_YUV422P :
585 case MPP_FMT_YUV422SP :
586 case MPP_FMT_BGR444 :
587 case MPP_FMT_RGB444 :
588 case MPP_FMT_RGB555 :
589 case MPP_FMT_BGR555 :
590 case MPP_FMT_RGB565 :
591 case MPP_FMT_BGR565 :
592 case MPP_FMT_YUV422_YUYV :
593 case MPP_FMT_YUV422_YVYU :
594 case MPP_FMT_YUV422_UYVY :
595 case MPP_FMT_YUV422_VYUY : {
596 ret = read_with_pixel_width(buf_y, width, height, hor_stride, 2, fp);
597 } break;
598 case MPP_FMT_YUV444SP :
599 case MPP_FMT_YUV444P :
600 case MPP_FMT_RGB888 :
601 case MPP_FMT_BGR888 : {
602 ret = read_with_pixel_width(buf_y, width, height, hor_stride, 3, fp);
603 } break;
604 default : {
605 mpp_err_f("read image do not support fmt %d\n", fmt);
606 ret = MPP_ERR_VALUE;
607 } break;
608 }
609
610 err:
611
612 return ret;
613 }
614
fill_MPP_FMT_YUV420SP(RK_U8 * buf,RK_U32 width,RK_U32 height,RK_U32 hor_stride,RK_U32 ver_stride,RK_U32 frame_count)615 static void fill_MPP_FMT_YUV420SP(RK_U8 *buf, RK_U32 width, RK_U32 height,
616 RK_U32 hor_stride, RK_U32 ver_stride,
617 RK_U32 frame_count)
618 {
619 // MPP_FMT_YUV420SP = ffmpeg: nv12
620 // https://www.fourcc.org/pixel-format/yuv-nv12/
621 RK_U8 *p = buf;
622 RK_U32 x, y;
623
624 for (y = 0; y < height; y++, p += hor_stride) {
625 for (x = 0; x < width; x++) {
626 p[x] = x + y + frame_count * 3;
627 }
628 }
629
630 p = buf + hor_stride * ver_stride;
631 for (y = 0; y < height / 2; y++, p += hor_stride) {
632 for (x = 0; x < width / 2; x++) {
633 p[x * 2 + 0] = 128 + y + frame_count * 2;
634 p[x * 2 + 1] = 64 + x + frame_count * 5;
635 }
636 }
637 }
638
fill_MPP_FMT_YUV422SP(RK_U8 * buf,RK_U32 width,RK_U32 height,RK_U32 hor_stride,RK_U32 ver_stride,RK_U32 frame_count)639 static void fill_MPP_FMT_YUV422SP(RK_U8 *buf, RK_U32 width, RK_U32 height,
640 RK_U32 hor_stride, RK_U32 ver_stride,
641 RK_U32 frame_count)
642 {
643 // MPP_FMT_YUV422SP = ffmpeg: nv16
644 // not valid in www.fourcc.org
645 RK_U8 *p = buf;
646 RK_U32 x, y;
647
648 for (y = 0; y < height; y++, p += hor_stride) {
649 for (x = 0; x < width; x++) {
650 p[x] = x + y + frame_count * 3;
651 }
652 }
653
654 p = buf + hor_stride * ver_stride;
655 for (y = 0; y < height; y++, p += hor_stride) {
656 for (x = 0; x < width / 2; x++) {
657 p[x * 2 + 0] = 128 + y / 2 + frame_count * 2;
658 p[x * 2 + 1] = 64 + x + frame_count * 5;
659 }
660 }
661 }
662
get_rgb_color(RK_U32 * R,RK_U32 * G,RK_U32 * B,RK_S32 x,RK_S32 y,RK_S32 frm_cnt)663 static void get_rgb_color(RK_U32 *R, RK_U32 *G, RK_U32 *B, RK_S32 x, RK_S32 y, RK_S32 frm_cnt)
664 {
665 // frame 0 -> red
666 if (frm_cnt == 0) {
667 R[0] = 0xff;
668 G[0] = 0;
669 B[0] = 0;
670 return ;
671 }
672
673 // frame 1 -> green
674 if (frm_cnt == 1) {
675 R[0] = 0;
676 G[0] = 0xff;
677 B[0] = 0;
678 return ;
679 }
680
681 // frame 2 -> blue
682 if (frm_cnt == 2) {
683 R[0] = 0;
684 G[0] = 0;
685 B[0] = 0xff;
686 return ;
687 }
688
689 // moving color bar
690 RK_U8 Y = (0 + x + y + frm_cnt * 3);
691 RK_U8 U = (128 + (y / 2) + frm_cnt * 2);
692 RK_U8 V = (64 + (x / 2) + frm_cnt * 5);
693
694 RK_S32 _R = Y + ((360 * (V - 128)) >> 8);
695 RK_S32 _G = Y - (((88 * (U - 128) + 184 * (V - 128))) >> 8);
696 RK_S32 _B = Y + ((455 * (U - 128)) >> 8);
697
698 R[0] = MPP_CLIP3(0, 255, _R);
699 G[0] = MPP_CLIP3(0, 255, _G);
700 B[0] = MPP_CLIP3(0, 255, _B);
701 }
702
fill_MPP_FMT_RGB565(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)703 static void fill_MPP_FMT_RGB565(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
704 {
705 // MPP_FMT_RGB565 = ffmpeg: rgb565be
706 // 16 bit pixel MSB --------> LSB
707 // (rrrr,rggg,gggb,bbbb)
708 // big endian | byte 0 | byte 1 |
709 // little endian | byte 1 | byte 0 |
710 RK_U16 val = (((R >> 3) & 0x1f) << 11) |
711 (((G >> 2) & 0x3f) << 5) |
712 (((B >> 3) & 0x1f) << 0);
713 if (be) {
714 p[0] = (val >> 8) & 0xff;
715 p[1] = (val >> 0) & 0xff;
716 } else {
717 p[0] = (val >> 0) & 0xff;
718 p[1] = (val >> 8) & 0xff;
719 }
720 }
721
fill_MPP_FMT_BGR565(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)722 static void fill_MPP_FMT_BGR565(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
723 {
724 // MPP_FMT_BGR565 = ffmpeg: bgr565be
725 // 16 bit pixel MSB --------> LSB
726 // (bbbb,bggg,gggr,rrrr)
727 // big endian | byte 0 | byte 1 |
728 // little endian | byte 1 | byte 0 |
729 RK_U16 val = (((R >> 3) & 0x1f) << 0) |
730 (((G >> 2) & 0x3f) << 5) |
731 (((B >> 3) & 0x1f) << 11);
732 if (be) {
733 p[0] = (val >> 8) & 0xff;
734 p[1] = (val >> 0) & 0xff;
735 } else {
736 p[0] = (val >> 0) & 0xff;
737 p[1] = (val >> 8) & 0xff;
738 }
739 }
740
fill_MPP_FMT_RGB555(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)741 static void fill_MPP_FMT_RGB555(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
742 {
743 // MPP_FMT_RGB555 = ffmpeg: rgb555be
744 // 16 bit pixel MSB --------> LSB
745 // (0rrr,rrgg,gggb,bbbb)
746 // big endian | byte 0 | byte 1 |
747 // little endian | byte 1 | byte 0 |
748 RK_U16 val = (((R >> 3) & 0x1f) << 10) |
749 (((G >> 3) & 0x1f) << 5) |
750 (((B >> 3) & 0x1f) << 0);
751 if (be) {
752 p[0] = (val >> 8) & 0xff;
753 p[1] = (val >> 0) & 0xff;
754 } else {
755 p[0] = (val >> 0) & 0xff;
756 p[1] = (val >> 8) & 0xff;
757 }
758 }
759
fill_MPP_FMT_BGR555(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)760 static void fill_MPP_FMT_BGR555(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
761 {
762 // MPP_FMT_BGR555 = ffmpeg: bgr555be
763 // 16 bit pixel MSB --------> LSB
764 // (0bbb,bbgg,gggr,rrrr)
765 // big endian | byte 0 | byte 1 |
766 // little endian | byte 1 | byte 0 |
767 RK_U16 val = (((R >> 3) & 0x1f) << 0) |
768 (((G >> 3) & 0x1f) << 5) |
769 (((B >> 3) & 0x1f) << 10);
770 if (be) {
771 p[0] = (val >> 8) & 0xff;
772 p[1] = (val >> 0) & 0xff;
773 } else {
774 p[0] = (val >> 0) & 0xff;
775 p[1] = (val >> 8) & 0xff;
776 }
777 }
778
fill_MPP_FMT_RGB444(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)779 static void fill_MPP_FMT_RGB444(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
780 {
781 // MPP_FMT_RGB444 = ffmpeg: rgb444be
782 // 16 bit pixel MSB --------> LSB
783 // (0000,rrrr,gggg,bbbb)
784 // big endian | byte 0 | byte 1 |
785 // little endian | byte 1 | byte 0 |
786 RK_U16 val = (((R >> 4) & 0xf) << 8) |
787 (((G >> 4) & 0xf) << 4) |
788 (((B >> 4) & 0xf) << 0);
789 if (be) {
790 p[0] = (val >> 8) & 0xff;
791 p[1] = (val >> 0) & 0xff;
792 } else {
793 p[0] = (val >> 0) & 0xff;
794 p[1] = (val >> 8) & 0xff;
795 }
796 }
797
fill_MPP_FMT_BGR444(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)798 static void fill_MPP_FMT_BGR444(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
799 {
800 // MPP_FMT_BGR444 = ffmpeg: bgr444be
801 // 16 bit pixel MSB --------> LSB
802 // (0000,bbbb,gggg,rrrr)
803 // big endian | byte 0 | byte 1 |
804 // little endian | byte 1 | byte 0 |
805 RK_U16 val = (((R >> 4) & 0xf) << 0) |
806 (((G >> 4) & 0xf) << 4) |
807 (((B >> 4) & 0xf) << 8);
808 if (be) {
809 p[0] = (val >> 8) & 0xff;
810 p[1] = (val >> 0) & 0xff;
811 } else {
812 p[0] = (val >> 0) & 0xff;
813 p[1] = (val >> 8) & 0xff;
814 }
815 }
816
fill_MPP_FMT_RGB888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)817 static void fill_MPP_FMT_RGB888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
818 {
819 // MPP_FMT_RGB888
820 // 24 bit pixel MSB --------> LSB
821 // (rrrr,rrrr,gggg,gggg,bbbb,bbbb)
822 // big endian | byte 0 | byte 1 | byte 2 |
823 // little endian | byte 2 | byte 1 | byte 0 |
824 if (be) {
825 p[0] = R;
826 p[1] = G;
827 p[2] = B;
828 } else {
829 p[0] = B;
830 p[1] = G;
831 p[2] = R;
832 }
833 }
834
fill_MPP_FMT_BGR888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)835 static void fill_MPP_FMT_BGR888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
836 {
837 // MPP_FMT_BGR888
838 // 24 bit pixel MSB --------> LSB
839 // (bbbb,bbbb,gggg,gggg,rrrr,rrrr)
840 // big endian | byte 0 | byte 1 | byte 2 |
841 // little endian | byte 2 | byte 1 | byte 0 |
842 if (be) {
843 p[0] = B;
844 p[1] = G;
845 p[2] = R;
846 } else {
847 p[0] = R;
848 p[1] = G;
849 p[2] = B;
850 }
851 }
852
fill_MPP_FMT_RGB101010(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)853 static void fill_MPP_FMT_RGB101010(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
854 {
855 // MPP_FMT_RGB101010
856 // 32 bit pixel MSB --------> LSB
857 // (00rr,rrrr,rrrr,gggg,gggg,ggbb,bbbb,bbbb)
858 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
859 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
860 RK_U32 val = (((R * 4) & 0x3ff) << 20) |
861 (((G * 4) & 0x3ff) << 10) |
862 (((B * 4) & 0x3ff) << 0);
863 if (be) {
864 p[0] = (val >> 24) & 0xff;
865 p[1] = (val >> 16) & 0xff;
866 p[2] = (val >> 8) & 0xff;
867 p[3] = (val >> 0) & 0xff;
868 } else {
869 p[0] = (val >> 0) & 0xff;
870 p[1] = (val >> 8) & 0xff;
871 p[2] = (val >> 16) & 0xff;
872 p[3] = (val >> 24) & 0xff;
873 }
874 }
875
fill_MPP_FMT_BGR101010(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)876 static void fill_MPP_FMT_BGR101010(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
877 {
878 // MPP_FMT_BGR101010
879 // 32 bit pixel MSB --------> LSB
880 // (00bb,bbbb,bbbb,gggg,gggg,ggrr,rrrr,rrrr)
881 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
882 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
883 RK_U32 val = (((R * 4) & 0x3ff) << 0) |
884 (((G * 4) & 0x3ff) << 10) |
885 (((B * 4) & 0x3ff) << 20);
886 if (be) {
887 p[0] = (val >> 24) & 0xff;
888 p[1] = (val >> 16) & 0xff;
889 p[2] = (val >> 8) & 0xff;
890 p[3] = (val >> 0) & 0xff;
891 } else {
892 p[0] = (val >> 0) & 0xff;
893 p[1] = (val >> 8) & 0xff;
894 p[2] = (val >> 16) & 0xff;
895 p[3] = (val >> 24) & 0xff;
896 }
897 }
898
fill_MPP_FMT_ARGB8888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)899 static void fill_MPP_FMT_ARGB8888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
900 {
901 // MPP_FMT_ARGB8888
902 // 32 bit pixel MSB --------> LSB
903 // (XXXX,XXXX,rrrr,rrrr,gggg,gggg,bbbb,bbbb)
904 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
905 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
906 if (be) {
907 p[0] = 0xff;
908 p[1] = R;
909 p[2] = G;
910 p[3] = B;
911 } else {
912 p[0] = B;
913 p[1] = G;
914 p[2] = R;
915 p[3] = 0xff;
916 }
917 }
918
fill_MPP_FMT_ABGR8888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)919 static void fill_MPP_FMT_ABGR8888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
920 {
921 // MPP_FMT_ABGR8888
922 // 32 bit pixel MSB --------> LSB
923 // (XXXX,XXXX,bbbb,bbbb,gggg,gggg,rrrr,rrrr)
924 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
925 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
926 if (be) {
927 p[0] = 0xff;
928 p[1] = B;
929 p[2] = G;
930 p[3] = R;
931 } else {
932 p[0] = R;
933 p[1] = G;
934 p[2] = B;
935 p[3] = 0xff;
936 }
937 }
938
fill_MPP_FMT_BGRA8888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)939 static void fill_MPP_FMT_BGRA8888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
940 {
941 // MPP_FMT_BGRA8888
942 // 32 bit pixel MSB --------> LSB
943 // (bbbb,bbbb,gggg,gggg,rrrr,rrrr,XXXX,XXXX)
944 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
945 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
946 if (be) {
947 p[0] = B;
948 p[1] = G;
949 p[2] = R;
950 p[3] = 0xff;
951 } else {
952 p[0] = 0xff;
953 p[1] = R;
954 p[2] = G;
955 p[3] = B;
956 }
957 }
958
fill_MPP_FMT_RGBA8888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)959 static void fill_MPP_FMT_RGBA8888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
960 {
961 // MPP_FMT_RGBA8888
962 // 32 bit pixel MSB --------> LSB
963 // (rrrr,rrrr,gggg,gggg,bbbb,bbbb,XXXX,XXXX)
964 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
965 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
966 if (be) {
967 p[0] = R;
968 p[1] = G;
969 p[2] = B;
970 p[3] = 0xff;
971 } else {
972 p[0] = 0xff;
973 p[1] = B;
974 p[2] = G;
975 p[3] = R;
976 }
977 }
978
979 typedef void (*FillRgbFunc)(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be);
980
981 FillRgbFunc fill_rgb_funcs[] = {
982 fill_MPP_FMT_RGB565,
983 fill_MPP_FMT_BGR565,
984 fill_MPP_FMT_RGB555,
985 fill_MPP_FMT_BGR555,
986 fill_MPP_FMT_RGB444,
987 fill_MPP_FMT_BGR444,
988 fill_MPP_FMT_RGB888,
989 fill_MPP_FMT_BGR888,
990 fill_MPP_FMT_RGB101010,
991 fill_MPP_FMT_BGR101010,
992 fill_MPP_FMT_ARGB8888,
993 fill_MPP_FMT_ABGR8888,
994 fill_MPP_FMT_BGRA8888,
995 fill_MPP_FMT_RGBA8888,
996 };
997
util_check_stride_by_pixel(RK_S32 workaround,RK_S32 width,RK_S32 hor_stride,RK_S32 pixel_size)998 static RK_S32 util_check_stride_by_pixel(RK_S32 workaround, RK_S32 width,
999 RK_S32 hor_stride, RK_S32 pixel_size)
1000 {
1001 if (!workaround && hor_stride < width * pixel_size) {
1002 mpp_log("warning: stride by bytes %d is smarller than width %d mutiple by pixel size %d\n",
1003 hor_stride, width, pixel_size);
1004 mpp_log("multiple stride %d by pixel size %d and set new byte stride to %d\n",
1005 hor_stride, pixel_size, hor_stride * pixel_size);
1006 workaround = 1;
1007 }
1008
1009 return workaround;
1010 }
1011
util_check_8_pixel_aligned(RK_S32 workaround,RK_S32 hor_stride,RK_S32 pixel_aign,RK_S32 pixel_size,const char * fmt_name)1012 static RK_S32 util_check_8_pixel_aligned(RK_S32 workaround, RK_S32 hor_stride,
1013 RK_S32 pixel_aign, RK_S32 pixel_size,
1014 const char *fmt_name)
1015 {
1016 if (!workaround && hor_stride != MPP_ALIGN(hor_stride, pixel_aign * pixel_size)) {
1017 mpp_log("warning: vepu only support 8 aligned horizontal stride in pixel for %s with pixel size %d\n",
1018 fmt_name, pixel_size);
1019 mpp_log("set byte stride to %d to match the requirement\n",
1020 MPP_ALIGN(hor_stride, pixel_aign * pixel_size));
1021 workaround = 1;
1022 }
1023
1024 return workaround;
1025 }
1026
fill_image(RK_U8 * buf,RK_U32 width,RK_U32 height,RK_U32 hor_stride,RK_U32 ver_stride,MppFrameFormat fmt,RK_U32 frame_count)1027 MPP_RET fill_image(RK_U8 *buf, RK_U32 width, RK_U32 height,
1028 RK_U32 hor_stride, RK_U32 ver_stride, MppFrameFormat fmt,
1029 RK_U32 frame_count)
1030 {
1031 MPP_RET ret = MPP_OK;
1032 RK_U8 *buf_y = buf;
1033 RK_U8 *buf_c = buf + hor_stride * ver_stride;
1034 RK_U32 x, y, i;
1035 static RK_S32 is_pixel_stride = 0;
1036 static RK_S32 not_8_pixel = 0;
1037
1038 switch (fmt & MPP_FRAME_FMT_MASK) {
1039 case MPP_FMT_YUV420SP : {
1040 fill_MPP_FMT_YUV420SP(buf, width, height, hor_stride, ver_stride, frame_count);
1041 } break;
1042 case MPP_FMT_YUV422SP : {
1043 fill_MPP_FMT_YUV422SP(buf, width, height, hor_stride, ver_stride, frame_count);
1044 } break;
1045 case MPP_FMT_YUV420P : {
1046 RK_U8 *p = buf_y;
1047
1048 for (y = 0; y < height; y++, p += hor_stride) {
1049 for (x = 0; x < width; x++) {
1050 p[x] = x + y + frame_count * 3;
1051 }
1052 }
1053
1054 p = buf_c;
1055 for (y = 0; y < height / 2; y++, p += hor_stride / 2) {
1056 for (x = 0; x < width / 2; x++) {
1057 p[x] = 128 + y + frame_count * 2;
1058 }
1059 }
1060
1061 p = buf_c + hor_stride * ver_stride / 4;
1062 for (y = 0; y < height / 2; y++, p += hor_stride / 2) {
1063 for (x = 0; x < width / 2; x++) {
1064 p[x] = 64 + x + frame_count * 5;
1065 }
1066 }
1067 } break;
1068 case MPP_FMT_YUV420SP_VU : {
1069 RK_U8 *p = buf_y;
1070
1071 for (y = 0; y < height; y++, p += hor_stride) {
1072 for (x = 0; x < width; x++) {
1073 p[x] = x + y + frame_count * 3;
1074 }
1075 }
1076
1077 p = buf_c;
1078 for (y = 0; y < height / 2; y++, p += hor_stride) {
1079 for (x = 0; x < width / 2; x++) {
1080 p[x * 2 + 1] = 128 + y + frame_count * 2;
1081 p[x * 2 + 0] = 64 + x + frame_count * 5;
1082 }
1083 }
1084 } break;
1085 case MPP_FMT_YUV422P : {
1086 RK_U8 *p = buf_y;
1087
1088 for (y = 0; y < height; y++, p += hor_stride) {
1089 for (x = 0; x < width; x++) {
1090 p[x] = x + y + frame_count * 3;
1091 }
1092 }
1093
1094 p = buf_c;
1095 for (y = 0; y < height; y++, p += hor_stride / 2) {
1096 for (x = 0; x < width / 2; x++) {
1097 p[x] = 128 + y / 2 + frame_count * 2;
1098 }
1099 }
1100
1101 p = buf_c + hor_stride * ver_stride / 2;
1102 for (y = 0; y < height; y++, p += hor_stride / 2) {
1103 for (x = 0; x < width / 2; x++) {
1104 p[x] = 64 + x + frame_count * 5;
1105 }
1106 }
1107 } break;
1108 case MPP_FMT_YUV422SP_VU : {
1109 RK_U8 *p = buf_y;
1110
1111 for (y = 0; y < height; y++, p += hor_stride) {
1112 for (x = 0; x < width; x++) {
1113 p[x] = x + y + frame_count * 3;
1114 }
1115 }
1116
1117 p = buf_c;
1118 for (y = 0; y < height; y++, p += hor_stride) {
1119 for (x = 0; x < width / 2; x++) {
1120 p[x * 2 + 1] = 128 + y / 2 + frame_count * 2;
1121 p[x * 2 + 0] = 64 + x + frame_count * 5;
1122 }
1123 }
1124 } break;
1125 case MPP_FMT_YUV422_YUYV : {
1126 RK_U8 *p = buf_y;
1127
1128 for (y = 0; y < height; y++, p += hor_stride) {
1129 for (x = 0; x < width / 2; x++) {
1130 p[x * 4 + 0] = x * 2 + 0 + y + frame_count * 3;
1131 p[x * 4 + 2] = x * 2 + 1 + y + frame_count * 3;
1132 p[x * 4 + 1] = 128 + y / 2 + frame_count * 2;
1133 p[x * 4 + 3] = 64 + x + frame_count * 5;
1134 }
1135 }
1136 } break;
1137 case MPP_FMT_YUV422_YVYU : {
1138 RK_U8 *p = buf_y;
1139
1140 for (y = 0; y < height; y++, p += hor_stride) {
1141 for (x = 0; x < width / 2; x++) {
1142 p[x * 4 + 0] = x * 2 + 0 + y + frame_count * 3;
1143 p[x * 4 + 2] = x * 2 + 1 + y + frame_count * 3;
1144 p[x * 4 + 3] = 128 + y / 2 + frame_count * 2;
1145 p[x * 4 + 1] = 64 + x + frame_count * 5;
1146 }
1147 }
1148 } break;
1149 case MPP_FMT_YUV422_UYVY : {
1150 RK_U8 *p = buf_y;
1151
1152 for (y = 0; y < height; y++, p += hor_stride) {
1153 for (x = 0; x < width / 2; x++) {
1154 p[x * 4 + 1] = x * 2 + 0 + y + frame_count * 3;
1155 p[x * 4 + 3] = x * 2 + 1 + y + frame_count * 3;
1156 p[x * 4 + 0] = 128 + y / 2 + frame_count * 2;
1157 p[x * 4 + 2] = 64 + x + frame_count * 5;
1158 }
1159 }
1160 } break;
1161 case MPP_FMT_YUV422_VYUY : {
1162 RK_U8 *p = buf_y;
1163
1164 for (y = 0; y < height; y++, p += hor_stride) {
1165 for (x = 0; x < width / 2; x++) {
1166 p[x * 4 + 1] = x * 2 + 0 + y + frame_count * 3;
1167 p[x * 4 + 3] = x * 2 + 1 + y + frame_count * 3;
1168 p[x * 4 + 2] = 128 + y / 2 + frame_count * 2;
1169 p[x * 4 + 0] = 64 + x + frame_count * 5;
1170 }
1171 }
1172 } break;
1173 case MPP_FMT_YUV400 : {
1174 RK_U8 *p = buf_y;
1175
1176 for (y = 0; y < height; y++, p += hor_stride) {
1177 for (x = 0; x < width; x++) {
1178 p[x] = x + y + frame_count * 3;
1179 }
1180 }
1181 } break;
1182 case MPP_FMT_YUV444SP : {
1183 RK_U8 *p = buf_y;
1184
1185 for (y = 0; y < height; y++, p += hor_stride) {
1186 for (x = 0; x < width; x++) {
1187 p[x] = x + y + frame_count * 3;
1188 }
1189 }
1190
1191 p = buf + hor_stride * ver_stride;
1192 for (y = 0; y < height; y++, p += hor_stride * 2) {
1193 for (x = 0; x < width; x++) {
1194 p[x * 2 + 0] = 128 + y / 2 + frame_count * 2;
1195 p[x * 2 + 1] = 64 + x + frame_count * 5;
1196 }
1197 }
1198 } break;
1199 case MPP_FMT_YUV444P : {
1200 RK_U8 *p = buf_y;
1201
1202 for (y = 0; y < height; y++, p += hor_stride) {
1203 for (x = 0; x < width; x++) {
1204 p[x] = x + y + frame_count * 3;
1205 }
1206 }
1207 p = buf + hor_stride * ver_stride;
1208 for (y = 0; y < height; y++, p += hor_stride) {
1209 for (x = 0; x < width; x++) {
1210 p[x] = 128 + y / 2 + frame_count * 2;
1211 }
1212 }
1213 p = buf + hor_stride * ver_stride * 2;
1214 for (y = 0; y < height; y++, p += hor_stride) {
1215 for (x = 0; x < width; x++) {
1216 p[x] = 64 + x + frame_count * 5;
1217 }
1218 }
1219 } break;
1220 case MPP_FMT_RGB565 :
1221 case MPP_FMT_BGR565 :
1222 case MPP_FMT_RGB555 :
1223 case MPP_FMT_BGR555 :
1224 case MPP_FMT_RGB444 :
1225 case MPP_FMT_BGR444 : {
1226 RK_U8 *p = buf_y;
1227 RK_U32 pix_w = 2;
1228 FillRgbFunc fill = fill_rgb_funcs[fmt - MPP_FRAME_FMT_RGB];
1229
1230 if (util_check_stride_by_pixel(is_pixel_stride, width, hor_stride, pix_w)) {
1231 hor_stride *= pix_w;
1232 is_pixel_stride = 1;
1233 }
1234
1235 if (util_check_8_pixel_aligned(not_8_pixel, hor_stride,
1236 8, pix_w, "16bit RGB")) {
1237 hor_stride = MPP_ALIGN(hor_stride, 16);
1238 not_8_pixel = 1;
1239 }
1240
1241 for (y = 0; y < height; y++, p += hor_stride) {
1242 for (x = 0, i = 0; x < width; x++, i += pix_w) {
1243 RK_U32 R, G, B;
1244
1245 get_rgb_color(&R, &G, &B, x, y, frame_count);
1246 fill(p + i, R, G, B, MPP_FRAME_FMT_IS_BE(fmt));
1247 }
1248 }
1249 } break;
1250 case MPP_FMT_RGB101010 :
1251 case MPP_FMT_BGR101010 :
1252 case MPP_FMT_ARGB8888 :
1253 case MPP_FMT_ABGR8888 :
1254 case MPP_FMT_BGRA8888 :
1255 case MPP_FMT_RGBA8888 : {
1256 RK_U8 *p = buf_y;
1257 RK_U32 pix_w = 4;
1258 FillRgbFunc fill = fill_rgb_funcs[fmt - MPP_FRAME_FMT_RGB];
1259
1260 if (util_check_stride_by_pixel(is_pixel_stride, width, hor_stride, pix_w)) {
1261 hor_stride *= pix_w;
1262 is_pixel_stride = 1;
1263 }
1264
1265 if (util_check_8_pixel_aligned(not_8_pixel, hor_stride,
1266 8, pix_w, "32bit RGB")) {
1267 hor_stride = MPP_ALIGN(hor_stride, 32);
1268 not_8_pixel = 1;
1269 }
1270
1271 for (y = 0; y < height; y++, p += hor_stride) {
1272 for (x = 0, i = 0; x < width; x++, i += pix_w) {
1273 RK_U32 R, G, B;
1274
1275 get_rgb_color(&R, &G, &B, x, y, frame_count);
1276 fill(p + i, R, G, B, MPP_FRAME_FMT_IS_BE(fmt));
1277 }
1278 }
1279 } break;
1280 case MPP_FMT_BGR888 :
1281 case MPP_FMT_RGB888 : {
1282 RK_U8 *p = buf_y;
1283 RK_U32 pix_w = 3;
1284 FillRgbFunc fill = fill_rgb_funcs[fmt - MPP_FRAME_FMT_RGB];
1285
1286 if (util_check_stride_by_pixel(is_pixel_stride, width, hor_stride, pix_w)) {
1287 hor_stride *= pix_w;
1288 is_pixel_stride = 1;
1289 }
1290
1291 if (util_check_8_pixel_aligned(not_8_pixel, hor_stride,
1292 8, pix_w, "24bit RGB")) {
1293 hor_stride = MPP_ALIGN(hor_stride, 24);
1294 not_8_pixel = 1;
1295 }
1296
1297 for (y = 0; y < height; y++, p += hor_stride) {
1298 for (x = 0, i = 0; x < width; x++, i += pix_w) {
1299 RK_U32 R, G, B;
1300
1301 get_rgb_color(&R, &G, &B, x, y, frame_count);
1302 fill(p + i, R, G, B, MPP_FRAME_FMT_IS_BE(fmt));
1303 }
1304 }
1305 } break;
1306 default : {
1307 mpp_err_f("filling function do not support type %d\n", fmt);
1308 ret = MPP_NOK;
1309 } break;
1310 }
1311 return ret;
1312 }
1313
parse_config_line(const char * str,OpsLine * info)1314 RK_S32 parse_config_line(const char *str, OpsLine *info)
1315 {
1316 RK_S32 cnt = sscanf(str, "%*[^,],%d,%[^,],%llu,%llu\n",
1317 &info->index, info->cmd,
1318 &info->value1, &info->value2);
1319
1320 return cnt;
1321 }
1322
get_extension(const char * file_name,char * extension)1323 static void get_extension(const char *file_name, char *extension)
1324 {
1325 size_t length = strlen(file_name);
1326 size_t ext_len = 0;
1327 size_t i = 0;
1328 const char *p = file_name + length - 1;
1329
1330 while (p >= file_name) {
1331 if (p[0] == '.') {
1332 for (i = 0; i < ext_len; i++)
1333 extension[i] = tolower(p[i + 1]);
1334
1335 extension[i] = '\0';
1336 return ;
1337 }
1338 ext_len++;
1339 p--;
1340 }
1341
1342 extension[0] = '\0';
1343 }
1344
1345 typedef struct Ext2FrmFmt_t {
1346 const char *ext_name;
1347 MppFrameFormat format;
1348 } Ext2FrmFmt;
1349
1350 Ext2FrmFmt map_ext_to_frm_fmt[] = {
1351 { "yuv420p", MPP_FMT_YUV420P, },
1352 { "yuv420sp", MPP_FMT_YUV420SP, },
1353 { "yuv422p", MPP_FMT_YUV422P, },
1354 { "yuv422sp", MPP_FMT_YUV422SP, },
1355 { "yuv422uyvy", MPP_FMT_YUV422_UYVY, },
1356 { "yuv422vyuy", MPP_FMT_YUV422_VYUY, },
1357 { "yuv422yuyv", MPP_FMT_YUV422_YUYV, },
1358 { "yuv422yvyu", MPP_FMT_YUV422_YVYU, },
1359
1360 { "abgr8888", MPP_FMT_ABGR8888, },
1361 { "argb8888", MPP_FMT_ARGB8888, },
1362 { "bgr565", MPP_FMT_BGR565, },
1363 { "bgr888", MPP_FMT_BGR888, },
1364 { "bgra8888", MPP_FMT_BGRA8888, },
1365 { "rgb565", MPP_FMT_RGB565, },
1366 { "rgb888", MPP_FMT_RGB888, },
1367 { "rgba8888", MPP_FMT_RGBA8888, },
1368
1369 { "fbc", MPP_FMT_YUV420SP | MPP_FRAME_FBC_AFBC_V1, },
1370 };
1371
name_to_frame_format(const char * name,MppFrameFormat * fmt)1372 MPP_RET name_to_frame_format(const char *name, MppFrameFormat *fmt)
1373 {
1374 RK_U32 i;
1375 MPP_RET ret = MPP_NOK;
1376 char ext[50];
1377
1378 get_extension(name, ext);
1379
1380 for (i = 0; i < MPP_ARRAY_ELEMS(map_ext_to_frm_fmt); i++) {
1381 Ext2FrmFmt *info = &map_ext_to_frm_fmt[i];
1382
1383 if (!strcmp(ext, info->ext_name)) {
1384 *fmt = info->format;
1385 ret = MPP_OK;
1386 }
1387 }
1388
1389 return ret;
1390 }
1391
1392 typedef struct Ext2Coding_t {
1393 const char *ext_name;
1394 MppCodingType coding;
1395 } Ext2Coding;
1396
1397 Ext2Coding map_ext_to_coding[] = {
1398 { "h264", MPP_VIDEO_CodingAVC, },
1399 { "264", MPP_VIDEO_CodingAVC, },
1400 { "avc", MPP_VIDEO_CodingAVC, },
1401
1402 { "h265", MPP_VIDEO_CodingHEVC, },
1403 { "265", MPP_VIDEO_CodingHEVC, },
1404 { "hevc", MPP_VIDEO_CodingHEVC, },
1405
1406 { "jpg", MPP_VIDEO_CodingMJPEG, },
1407 { "jpeg", MPP_VIDEO_CodingMJPEG, },
1408 { "mjpeg", MPP_VIDEO_CodingMJPEG, },
1409 };
1410
name_to_coding_type(const char * name,MppCodingType * coding)1411 MPP_RET name_to_coding_type(const char *name, MppCodingType *coding)
1412 {
1413 RK_U32 i;
1414 MPP_RET ret = MPP_NOK;
1415 char ext[50];
1416
1417 get_extension(name, ext);
1418
1419 for (i = 0; i < MPP_ARRAY_ELEMS(map_ext_to_coding); i++) {
1420 Ext2Coding *info = &map_ext_to_coding[i];
1421
1422 if (!strcmp(ext, info->ext_name)) {
1423 *coding = info->coding;
1424 ret = MPP_OK;
1425 }
1426 }
1427
1428 return ret;
1429 }
1430
1431 typedef struct FpsCalcImpl_t {
1432 spinlock_t lock;
1433 FpsCalcCb callback;
1434
1435 RK_S64 total_start;
1436 RK_S64 total_count;
1437
1438 RK_S64 last_start;
1439 RK_S64 last_count;
1440 } FpsCalcImpl;
1441
fps_calc_init(FpsCalc * ctx)1442 MPP_RET fps_calc_init(FpsCalc *ctx)
1443 {
1444 FpsCalcImpl *impl = mpp_calloc(FpsCalcImpl, 1);
1445 MPP_RET ret = MPP_NOK;
1446
1447 if (impl) {
1448 mpp_spinlock_init(&impl->lock);
1449 ret = MPP_OK;
1450 }
1451 *ctx = impl;
1452
1453 return ret;
1454 }
1455
fps_calc_deinit(FpsCalc ctx)1456 MPP_RET fps_calc_deinit(FpsCalc ctx)
1457 {
1458 MPP_FREE(ctx);
1459
1460 return MPP_OK;
1461 }
1462
fps_calc_set_cb(FpsCalc ctx,FpsCalcCb cb)1463 MPP_RET fps_calc_set_cb(FpsCalc ctx, FpsCalcCb cb)
1464 {
1465 FpsCalcImpl *impl = (FpsCalcImpl *)ctx;
1466
1467 if (impl)
1468 impl->callback = cb;
1469
1470 return MPP_OK;
1471 }
1472
fps_calc_inc(FpsCalc ctx)1473 MPP_RET fps_calc_inc(FpsCalc ctx)
1474 {
1475 FpsCalcImpl *impl = (FpsCalcImpl *)ctx;
1476 RK_S64 total_time = 0;
1477 RK_S64 total_count = 0;
1478 RK_S64 last_time = 0;
1479 RK_S64 last_count = 0;
1480
1481 if (NULL == impl)
1482 return MPP_OK;
1483
1484 mpp_spinlock_lock(&impl->lock);
1485 {
1486 RK_S64 now = mpp_time();
1487 if (!impl->total_count) {
1488 impl->total_start = now;
1489 impl->last_start = now;
1490 } else {
1491 RK_S64 elapsed = now - impl->last_start;
1492
1493 // print on each second
1494 if (elapsed >= 1000000) {
1495 total_time = now - impl->total_start;
1496 total_count = impl->total_count;
1497 last_time = now - impl->last_start;
1498 last_count = impl->total_count - impl->last_count;
1499
1500 impl->last_start = now;
1501 impl->last_count = impl->total_count;
1502 }
1503 }
1504
1505 impl->total_count++;
1506 }
1507 mpp_spinlock_unlock(&impl->lock);
1508
1509 if (impl->callback && total_time)
1510 impl->callback(total_time, total_count, last_time, last_count);
1511
1512 return MPP_OK;
1513 }
1514