1 /*
2 * Copyright 2015 Rockchip Electronics Co. LTD
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define MODULE_TAG "utils"
18
19 #include <ctype.h>
20 #include <errno.h>
21 #include <limits.h>
22 #include <string.h>
23
24 #include "mpp_mem.h"
25 #include "mpp_log.h"
26 #include "mpp_lock.h"
27 #include "mpp_time.h"
28 #include "mpp_common.h"
29 #include "utils.h"
30
31 #define MAX_HALF_WORD_SUM_CNT \
32 ((RK_ULONG)((0-1) / ((1UL << ((__SIZEOF_POINTER__ * 8) / 2)) - 1)))
33 #define CAL_BYTE (__SIZEOF_POINTER__ >> 1)
34
_show_options(int count,OptionInfo * options)35 void _show_options(int count, OptionInfo *options)
36 {
37 int i;
38 for (i = 0; i < count; i++) {
39 if (NULL == options[i].name)
40 continue;
41
42 mpp_log("-%s %-16s\t%s\n",
43 options[i].name, options[i].argname, options[i].help);
44 }
45 }
46
rearrange_pix(RK_U8 * tmp_line,RK_U8 * base,RK_U32 n)47 static void rearrange_pix(RK_U8 *tmp_line, RK_U8 *base, RK_U32 n)
48 {
49 RK_U16 * pix = (RK_U16 *)(tmp_line + n * 16);
50 RK_U16 * base_u16 = (RK_U16 *)(base + n * 10);
51
52 pix[0] = base_u16[0] & 0x03FF;
53 pix[1] = (base_u16[0] & 0xFC00) >> 10 | (base_u16[1] & 0x000F) << 6;
54 pix[2] = (base_u16[1] & 0x3FF0) >> 4;
55 pix[3] = (base_u16[1] & 0xC000) >> 14 | (base_u16[2] & 0x00FF) << 2;
56 pix[4] = (base_u16[2] & 0xFF00) >> 8 | (base_u16[3] & 0x0003) << 8;
57 pix[5] = (base_u16[3] & 0x0FFC) >> 2;
58 pix[6] = (base_u16[3] & 0xF000) >> 12 | (base_u16[4] & 0x003F) << 4;
59 pix[7] = (base_u16[4] & 0xFFC0) >> 6;
60 }
61
dump_mpp_frame_to_file(MppFrame frame,FILE * fp)62 void dump_mpp_frame_to_file(MppFrame frame, FILE *fp)
63 {
64 RK_U32 width = 0;
65 RK_U32 height = 0;
66 RK_U32 h_stride = 0;
67 RK_U32 v_stride = 0;
68 MppFrameFormat fmt = MPP_FMT_YUV420SP;
69 MppBuffer buffer = NULL;
70 RK_U8 *base = NULL;
71
72 if (NULL == fp || NULL == frame)
73 return ;
74
75 width = mpp_frame_get_width(frame);
76 height = mpp_frame_get_height(frame);
77 h_stride = mpp_frame_get_hor_stride(frame);
78 v_stride = mpp_frame_get_ver_stride(frame);
79 fmt = mpp_frame_get_fmt(frame);
80 buffer = mpp_frame_get_buffer(frame);
81
82 if (NULL == buffer)
83 return ;
84
85 base = (RK_U8 *)mpp_buffer_get_ptr(buffer);
86
87 if (MPP_FRAME_FMT_IS_FBC(fmt) || MPP_FRAME_FMT_IS_RKFBC(fmt)) {
88 RK_S32 buf_size = mpp_buffer_get_size(buffer);
89
90 fwrite(base, 1, buf_size, fp);
91 return ;
92 }
93
94 if (MPP_FRAME_FMT_IS_RGB(fmt) && MPP_FRAME_FMT_IS_LE(fmt)) {
95 fmt &= MPP_FRAME_FMT_MASK;
96 }
97 switch (fmt & MPP_FRAME_FMT_MASK) {
98 case MPP_FMT_YUV422SP : {
99 /* YUV422SP -> YUV422P for better display */
100 RK_U32 i, j;
101 RK_U8 *base_y = base;
102 RK_U8 *base_c = base + h_stride * v_stride;
103 RK_U8 *tmp = mpp_malloc(RK_U8, h_stride * height * 2);
104 RK_U8 *tmp_u = tmp;
105 RK_U8 *tmp_v = tmp + width * height / 2;
106
107 for (i = 0; i < height; i++, base_y += h_stride)
108 fwrite(base_y, 1, width, fp);
109
110 for (i = 0; i < height; i++, base_c += h_stride) {
111 for (j = 0; j < width / 2; j++) {
112 tmp_u[j] = base_c[2 * j + 0];
113 tmp_v[j] = base_c[2 * j + 1];
114 }
115 tmp_u += width / 2;
116 tmp_v += width / 2;
117 }
118
119 fwrite(tmp, 1, width * height, fp);
120 mpp_free(tmp);
121 } break;
122 case MPP_FMT_YUV420SP_VU :
123 case MPP_FMT_YUV420SP : {
124 RK_U32 i;
125 RK_U8 *base_y = base;
126 RK_U8 *base_c = base + h_stride * v_stride;
127
128 for (i = 0; i < height; i++, base_y += h_stride) {
129 fwrite(base_y, 1, width, fp);
130 }
131 for (i = 0; i < height / 2; i++, base_c += h_stride) {
132 fwrite(base_c, 1, width, fp);
133 }
134 } break;
135 case MPP_FMT_YUV420P : {
136 RK_U32 i;
137 RK_U8 *base_y = base;
138 RK_U8 *base_c = base + h_stride * v_stride;
139
140 for (i = 0; i < height; i++, base_y += h_stride) {
141 fwrite(base_y, 1, width, fp);
142 }
143 for (i = 0; i < height / 2; i++, base_c += h_stride / 2) {
144 fwrite(base_c, 1, width / 2, fp);
145 }
146 for (i = 0; i < height / 2; i++, base_c += h_stride / 2) {
147 fwrite(base_c, 1, width / 2, fp);
148 }
149 } break;
150 case MPP_FMT_YUV420SP_10BIT : {
151 RK_U32 i, k;
152 RK_U8 *base_y = base;
153 RK_U8 *base_c = base + h_stride * v_stride;
154 RK_U8 *tmp_line = (RK_U8 *)mpp_malloc(RK_U16, width);
155
156 if (!tmp_line) {
157 mpp_log("tmp_line malloc fail");
158 return;
159 }
160
161 for (i = 0; i < height; i++, base_y += h_stride) {
162 for (k = 0; k < MPP_ALIGN(width, 8) / 8; k++)
163 rearrange_pix(tmp_line, base_y, k);
164 fwrite(tmp_line, width * sizeof(RK_U16), 1, fp);
165 }
166
167 for (i = 0; i < height / 2; i++, base_c += h_stride) {
168 for (k = 0; k < MPP_ALIGN(width, 8) / 8; k++)
169 rearrange_pix(tmp_line, base_c, k);
170 fwrite(tmp_line, width * sizeof(RK_U16), 1, fp);
171 }
172
173 MPP_FREE(tmp_line);
174 } break;
175 case MPP_FMT_YUV444SP : {
176 /* YUV444SP -> YUV444P for better display */
177 RK_U32 i, j;
178 RK_U8 *base_y = base;
179 RK_U8 *base_c = base + h_stride * v_stride;
180 RK_U8 *tmp = mpp_malloc(RK_U8, h_stride * height * 2);
181 RK_U8 *tmp_u = tmp;
182 RK_U8 *tmp_v = tmp + width * height;
183
184 for (i = 0; i < height; i++, base_y += h_stride)
185 fwrite(base_y, 1, width, fp);
186
187 for (i = 0; i < height; i++, base_c += h_stride * 2) {
188 for (j = 0; j < width; j++) {
189 tmp_u[j] = base_c[2 * j + 0];
190 tmp_v[j] = base_c[2 * j + 1];
191 }
192 tmp_u += width;
193 tmp_v += width;
194 }
195
196 fwrite(tmp, 1, width * height * 2, fp);
197 mpp_free(tmp);
198 } break;
199 case MPP_FMT_YUV444SP_10BIT : {
200 RK_U32 i, k;
201 RK_U8 *base_y = base;
202 RK_U8 *base_c = base + h_stride * v_stride;
203 RK_U8 *tmp_line = (RK_U8 *)mpp_malloc(RK_U16, width);
204
205 if (!tmp_line) {
206 mpp_log("tmp_line malloc fail");
207 return;
208 }
209
210 for (i = 0; i < height; i++, base_y += h_stride) {
211 for (k = 0; k < MPP_ALIGN(width, 8) / 8; k++)
212 rearrange_pix(tmp_line, base_y, k);
213 fwrite(tmp_line, width * sizeof(RK_U16), 1, fp);
214 }
215
216 for (i = 0; i < (height * 2); i++, base_c += h_stride) {
217 for (k = 0; k < MPP_ALIGN(width, 8) / 8; k++)
218 rearrange_pix(tmp_line, base_c, k);
219 fwrite(tmp_line, width * sizeof(RK_U16), 1, fp);
220 }
221
222 MPP_FREE(tmp_line);
223 } break;
224 case MPP_FMT_YUV400: {
225 RK_U32 i;
226 RK_U8 *base_y = base;
227 RK_U8 *tmp = mpp_malloc(RK_U8, h_stride * height);
228
229 for (i = 0; i < height; i++, base_y += h_stride)
230 fwrite(base_y, 1, width, fp);
231
232 mpp_free(tmp);
233 } break;
234 case MPP_FMT_ARGB8888:
235 case MPP_FMT_ABGR8888:
236 case MPP_FMT_BGRA8888:
237 case MPP_FMT_RGBA8888: {
238 RK_U32 i;
239 RK_U8 *base_y = base;
240 RK_U8 *tmp = mpp_malloc(RK_U8, width * height * 4);
241
242 for (i = 0; i < height; i++, base_y += h_stride)
243 fwrite(base_y, 1, width * 4, fp);
244
245 mpp_free(tmp);
246 } break;
247 case MPP_FMT_YUV422_YUYV:
248 case MPP_FMT_YUV422_YVYU:
249 case MPP_FMT_YUV422_UYVY:
250 case MPP_FMT_YUV422_VYUY:
251 case MPP_FMT_RGB565:
252 case MPP_FMT_BGR565:
253 case MPP_FMT_RGB555:
254 case MPP_FMT_BGR555:
255 case MPP_FMT_RGB444:
256 case MPP_FMT_BGR444: {
257 RK_U32 i;
258 RK_U8 *base_y = base;
259 RK_U8 *tmp = mpp_malloc(RK_U8, width * height * 2);
260
261 for (i = 0; i < height; i++, base_y += h_stride)
262 fwrite(base_y, 1, width * 2, fp);
263
264 mpp_free(tmp);
265 } break;
266 case MPP_FMT_RGB888: {
267 RK_U32 i;
268 RK_U8 *base_y = base;
269 RK_U8 *tmp = mpp_malloc(RK_U8, width * height * 3);
270
271 for (i = 0; i < height; i++, base_y += h_stride)
272 fwrite(base_y, 1, width * 3, fp);
273
274 mpp_free(tmp);
275 } break;
276 default : {
277 mpp_err("not supported format %d\n", fmt);
278 } break;
279 }
280 }
281
wide_bit_sum(RK_U8 * data,RK_U32 len,RK_ULONG * sum)282 void wide_bit_sum(RK_U8 *data, RK_U32 len, RK_ULONG *sum)
283 {
284 RK_U8 *data8 = NULL;
285 RK_U32 loop;
286 data8 = data;
287 #if LONG_MAX == INT_MAX
288 RK_U16 *data_rk = NULL;
289 data_rk = (RK_U16 *)data;
290 #else
291 RK_U32 *data_rk = NULL;
292 data_rk = (RK_U32 *)data;
293 #endif
294
295 for (loop = 0; loop < len / CAL_BYTE; loop++) {
296 *sum += data_rk[loop];
297 }
298 for (loop = len / CAL_BYTE * CAL_BYTE; loop < len; loop++) {
299 *sum += data8[loop];
300 }
301
302 return;
303 }
304
calc_data_crc(RK_U8 * dat,RK_U32 len,DataCrc * crc)305 void calc_data_crc(RK_U8 *dat, RK_U32 len, DataCrc *crc)
306 {
307 RK_ULONG data_grp_byte_cnt = MAX_HALF_WORD_SUM_CNT * CAL_BYTE;
308 RK_U32 i = 0, grp_loop = 0;
309 RK_U8 *dat8 = NULL;
310 RK_U32 *dat32 = NULL;
311 RK_U32 xor = 0;
312
313 /*calc sum */
314 crc->sum_cnt = (len + data_grp_byte_cnt - 1) / data_grp_byte_cnt;
315 for (grp_loop = 0; grp_loop < len / data_grp_byte_cnt; grp_loop++) {
316 wide_bit_sum(&dat[grp_loop * data_grp_byte_cnt], data_grp_byte_cnt, &crc->sum[grp_loop]);
317 }
318 if (len % data_grp_byte_cnt) {
319 wide_bit_sum(&dat[grp_loop * data_grp_byte_cnt], len % data_grp_byte_cnt, &crc->sum[grp_loop]);
320 }
321
322 /*calc xor */
323 dat32 = (RK_U32 *)dat;
324 for (i = 0; i < len / 4; i++)
325 xor ^= dat32[i];
326
327 if (len % 4) {
328 RK_U32 val = 0;
329 dat8 = (RK_U8 *)&val;
330 for (i = (len / 4) * 4; i < len; i++)
331 dat8[i % 4] = dat[i];
332 xor ^= val;
333 }
334
335 crc->len = len;
336 crc->vor = xor;
337 }
338
write_data_crc(FILE * fp,DataCrc * crc)339 void write_data_crc(FILE *fp, DataCrc *crc)
340 {
341 RK_U32 loop = 0;
342
343 if (fp) {
344 fprintf(fp, "%08d,", crc->len);
345 for (loop = 0; loop < crc->sum_cnt; loop++) {
346 fprintf(fp, " %lx,", crc->sum[loop]);
347 }
348 fprintf(fp, " %08x\n", crc->vor);
349 fflush(fp);
350 }
351 }
352
read_data_crc(FILE * fp,DataCrc * crc)353 void read_data_crc(FILE *fp, DataCrc *crc)
354 {
355 RK_U32 loop = 0;
356
357 if (fp) {
358 RK_S32 ret = 0;
359 ret = fscanf(fp, "%8d", &crc->len);
360 for (loop = 0; loop < crc->sum_cnt; loop++) {
361 ret |= fscanf(fp, "%lx", &crc->sum[loop]);
362 }
363 ret |= fscanf(fp, "%08x", &crc->vor);
364 if (ret == EOF)
365 mpp_err_f("unexpected EOF found\n");
366 }
367 }
368
calc_frm_crc(MppFrame frame,FrmCrc * crc)369 void calc_frm_crc(MppFrame frame, FrmCrc *crc)
370 {
371 RK_ULONG data_grp_byte_cnt = MAX_HALF_WORD_SUM_CNT * CAL_BYTE;
372 RK_U32 grp_line_cnt = 0;
373 RK_U32 grp_cnt = 0;
374
375 RK_U32 y = 0, x = 0;
376 RK_U8 *dat8 = NULL;
377 RK_U32 *dat32 = NULL;
378 RK_U32 xor = 0;
379
380 RK_U32 width = mpp_frame_get_width(frame);
381 RK_U32 height = mpp_frame_get_height(frame);
382 RK_U32 stride = mpp_frame_get_hor_stride(frame);
383 RK_U8 *buf = (RK_U8 *)mpp_buffer_get_ptr(mpp_frame_get_buffer(frame));
384
385 grp_line_cnt = data_grp_byte_cnt / ((width + CAL_BYTE - 1) / CAL_BYTE * CAL_BYTE);
386
387 /* luma */
388 grp_cnt = (height + grp_line_cnt - 1) / grp_line_cnt;
389 crc->luma.sum_cnt = grp_cnt;
390
391 dat8 = buf;
392 for (y = 0; y < height / grp_line_cnt * grp_line_cnt; y++) {
393 wide_bit_sum(&dat8[y * stride], width, &crc->luma.sum[y / grp_line_cnt]);
394 }
395 if (height % grp_line_cnt) {
396 for (y = height / grp_line_cnt * grp_line_cnt; y < height; y++) {
397 wide_bit_sum(&dat8[y * stride], width, &crc->luma.sum[y / grp_line_cnt]);
398 }
399 }
400
401 dat8 = buf;
402 for (y = 0; y < height; y++) {
403 dat32 = (RK_U32 *)&dat8[y * stride];
404 for (x = 0; x < width / 4; x++)
405 xor ^= dat32[x];
406 }
407 crc->luma.len = height * width;
408 crc->luma.vor = xor;
409
410 /* chroma */
411 grp_cnt = (height / 2 + grp_line_cnt - 1) / grp_line_cnt;
412 crc->chroma.sum_cnt = grp_cnt;
413
414 dat8 = buf + height * stride;
415 for (y = 0; y < height / 2 / grp_line_cnt * grp_line_cnt; y++) {
416 wide_bit_sum(&dat8[y * stride], width, &crc->chroma.sum[y / grp_line_cnt]);
417 }
418 if (height / 2 % grp_line_cnt) {
419 for (y = height / 2 / grp_line_cnt * grp_line_cnt; y < height / 2; y++) {
420 wide_bit_sum(&dat8[y * stride], width, &crc->chroma.sum[y / grp_line_cnt]);
421 }
422 }
423
424 dat8 = buf + height * stride;
425 for (y = 0; y < height / 2; y++) {
426 dat32 = (RK_U32 *)&dat8[y * stride];
427 for (x = 0; x < width / 4; x++)
428 xor ^= dat32[x];
429 }
430 crc->chroma.len = height * width / 2;
431 crc->chroma.vor = xor;
432 }
433
write_frm_crc(FILE * fp,FrmCrc * crc)434 void write_frm_crc(FILE *fp, FrmCrc *crc)
435 {
436 RK_U32 loop = 0;
437
438 if (fp) {
439 // luma
440 fprintf(fp, "%d,", crc->luma.len);
441 for (loop = 0; loop < crc->luma.sum_cnt; loop++) {
442 fprintf(fp, " %lx,", crc->luma.sum[loop]);
443 }
444 fprintf(fp, " %08x,", crc->luma.vor);
445
446 // chroma
447 fprintf(fp, " %d,", crc->chroma.len);
448 for (loop = 0; loop < crc->chroma.sum_cnt; loop++) {
449 fprintf(fp, " %lx,", crc->chroma.sum[loop]);
450 }
451 fprintf(fp, " %08x\n", crc->chroma.vor);
452
453 fflush(fp);
454 }
455 }
456
read_frm_crc(FILE * fp,FrmCrc * crc)457 void read_frm_crc(FILE *fp, FrmCrc *crc)
458 {
459 RK_U32 loop = 0;
460
461 if (fp) {
462 RK_S32 ret = 0;
463 // luma
464 ret = fscanf(fp, "%d", &crc->luma.len);
465 for (loop = 0; loop < crc->luma.sum_cnt; loop++) {
466 ret |= fscanf(fp, "%lx", &crc->luma.sum[loop]);
467 }
468 ret |= fscanf(fp, "%08x", &crc->luma.vor);
469
470 // chroma
471 ret |= fscanf(fp, "%d", &crc->chroma.len);
472 for (loop = 0; loop < crc->chroma.sum_cnt; loop++) {
473 ret |= fscanf(fp, "%lx", &crc->chroma.sum[loop]);
474 }
475 ret |= fscanf(fp, "%08x", &crc->chroma.vor);
476 if (ret == EOF)
477 mpp_err_f("unexpected EOF found\n");
478 }
479 }
480
read_with_pixel_width(RK_U8 * buf,RK_S32 width,RK_S32 height,RK_S32 hor_stride,RK_S32 pix_w,FILE * fp)481 static MPP_RET read_with_pixel_width(RK_U8 *buf, RK_S32 width, RK_S32 height,
482 RK_S32 hor_stride, RK_S32 pix_w, FILE *fp)
483 {
484 RK_S32 row;
485 MPP_RET ret = MPP_OK;
486
487 if (hor_stride < width * pix_w) {
488 mpp_err_f("invalid %dbit color config: hor_stride %d is smaller then width %d multiply by 4\n",
489 8 * pix_w, hor_stride, width, pix_w);
490 mpp_err_f("width should be defined by pixel count\n");
491 mpp_err_f("stride should be defined by byte count\n");
492
493 hor_stride = width * pix_w;
494 }
495
496 for (row = 0; row < height; row++) {
497 RK_S32 read_size = fread(buf + row * hor_stride, 1, width * pix_w, fp);
498 if (feof(fp)) {
499 ret = MPP_NOK;
500 break;
501 }
502 if (read_size != width * pix_w) {
503 mpp_err_f("read file failed expect %d vs %d\n",
504 width * pix_w, read_size);
505 ret = MPP_NOK;
506 }
507 }
508
509 return ret;
510 }
511
read_image(RK_U8 * buf,FILE * fp,RK_U32 width,RK_U32 height,RK_U32 hor_stride,RK_U32 ver_stride,MppFrameFormat fmt)512 MPP_RET read_image(RK_U8 *buf, FILE *fp, RK_U32 width, RK_U32 height,
513 RK_U32 hor_stride, RK_U32 ver_stride, MppFrameFormat fmt)
514 {
515 MPP_RET ret = MPP_OK;
516 RK_U32 read_size;
517 RK_U32 row = 0;
518 RK_U8 *buf_y = buf;
519 RK_U8 *buf_u = buf_y + hor_stride * ver_stride; // NOTE: diff from gen_yuv_image
520 RK_U8 *buf_v = buf_u + hor_stride * ver_stride / 4; // NOTE: diff from gen_yuv_image
521
522 if (MPP_FRAME_FMT_IS_FBC(fmt)) {
523 RK_U32 align_w = MPP_ALIGN(width, 16);
524 RK_U32 align_h = MPP_ALIGN(height, 16);
525 RK_U32 header_size = 0;
526
527 if ((fmt & MPP_FRAME_FBC_MASK) == MPP_FRAME_FBC_AFBC_V1)
528 header_size = MPP_ALIGN(align_w * align_h / 16, SZ_4K);
529 else
530 header_size = align_w * align_h / 16;
531
532 /* read fbc header first */
533 read_size = fread(buf, 1, header_size, fp);
534 if (read_size != header_size) {
535 mpp_err_f("read fbc file header failed %d vs %d\n",
536 read_size, header_size);
537 ret = MPP_NOK;
538 goto err;
539 }
540 buf += header_size;
541
542 switch (fmt & MPP_FRAME_FMT_MASK) {
543 case MPP_FMT_YUV420SP : {
544 read_size = fread(buf, 1, align_w * align_h * 3 / 2, fp);
545 if (read_size != align_w * align_h * 3 / 2) {
546 mpp_err_f("read 420sp fbc file payload failed %d vs %d\n",
547 read_size, align_w * align_h * 3 / 2);
548 ret = MPP_NOK;
549 goto err;
550 }
551 } break;
552 case MPP_FMT_YUV422SP :
553 case MPP_FMT_YUV422_YUYV :
554 case MPP_FMT_YUV422_YVYU :
555 case MPP_FMT_YUV422_UYVY :
556 case MPP_FMT_YUV422_VYUY : {
557 read_size = fread(buf, 1, align_w * align_h * 2, fp);
558 if (read_size != align_w * align_h * 2) {
559 mpp_err_f("read 422sp fbc file payload failed %d vs %d\n",
560 read_size, align_w * align_h * 2);
561 ret = MPP_NOK;
562 goto err;
563 }
564 } break;
565 default : {
566 mpp_err_f("not supported fbc format %x\n", fmt);
567 } break;
568 }
569
570 return MPP_OK;
571 }
572
573 switch (fmt & MPP_FRAME_FMT_MASK) {
574 case MPP_FMT_YUV420SP_VU:
575 case MPP_FMT_YUV420SP : {
576 for (row = 0; row < height; row++) {
577 read_size = fread(buf_y + row * hor_stride, 1, width, fp);
578 if (read_size != width) {
579 ret = MPP_NOK;
580 goto err;
581 }
582 }
583
584 height = MPP_ALIGN(height, 2);
585 width = MPP_ALIGN(width, 2);
586 for (row = 0; row < height / 2; row++) {
587 read_size = fread(buf_u + row * hor_stride, 1, width, fp);
588 if (read_size != width) {
589 ret = MPP_NOK;
590 goto err;
591 }
592 }
593 } break;
594 case MPP_FMT_YUV420P : {
595 for (row = 0; row < height; row++) {
596 read_size = fread(buf_y + row * hor_stride, 1, width, fp);
597 if (read_size != width) {
598 ret = MPP_NOK;
599 goto err;
600 }
601 }
602
603 width = MPP_ALIGN(width, 2);
604 height = MPP_ALIGN(height, 2);
605 for (row = 0; row < height / 2; row++) {
606 read_size = fread(buf_u + row * hor_stride / 2, 1, width / 2, fp);
607 if (read_size != width / 2) {
608 ret = MPP_NOK;
609 goto err;
610 }
611 }
612
613 for (row = 0; row < height / 2; row++) {
614 read_size = fread(buf_v + row * hor_stride / 2, 1, width / 2, fp);
615 if (read_size != width / 2) {
616 ret = MPP_NOK;
617 goto err;
618 }
619 }
620 } break;
621 case MPP_FMT_ARGB8888 :
622 case MPP_FMT_ABGR8888 :
623 case MPP_FMT_BGRA8888 :
624 case MPP_FMT_RGBA8888 :
625 case MPP_FMT_RGB101010 :
626 case MPP_FMT_BGR101010 : {
627 ret = read_with_pixel_width(buf_y, width, height, hor_stride, 4, fp);
628 } break;
629 case MPP_FMT_YUV422P :
630 case MPP_FMT_YUV422SP :
631 case MPP_FMT_YUV422SP_VU:
632 case MPP_FMT_BGR444 :
633 case MPP_FMT_RGB444 :
634 case MPP_FMT_RGB555 :
635 case MPP_FMT_BGR555 :
636 case MPP_FMT_RGB565 :
637 case MPP_FMT_BGR565 :
638 case MPP_FMT_YUV422_YUYV :
639 case MPP_FMT_YUV422_YVYU :
640 case MPP_FMT_YUV422_UYVY :
641 case MPP_FMT_YUV422_VYUY : {
642 ret = read_with_pixel_width(buf_y, width, height, hor_stride, 2, fp);
643 } break;
644 case MPP_FMT_YUV444SP :
645 case MPP_FMT_YUV444P :
646 case MPP_FMT_RGB888 :
647 case MPP_FMT_BGR888 : {
648 ret = read_with_pixel_width(buf_y, width, height, hor_stride, 3, fp);
649 } break;
650 case MPP_FMT_YUV400 : {
651 ret = read_with_pixel_width(buf_y, width, height, hor_stride, 1, fp);
652 } break;
653 default : {
654 mpp_err_f("read image do not support fmt %d\n", fmt);
655 ret = MPP_ERR_VALUE;
656 } break;
657 }
658
659 err:
660
661 return ret;
662 }
663
fill_MPP_FMT_YUV420SP(RK_U8 * buf,RK_U32 width,RK_U32 height,RK_U32 hor_stride,RK_U32 ver_stride,RK_U32 frame_count)664 static void fill_MPP_FMT_YUV420SP(RK_U8 *buf, RK_U32 width, RK_U32 height,
665 RK_U32 hor_stride, RK_U32 ver_stride,
666 RK_U32 frame_count)
667 {
668 // MPP_FMT_YUV420SP = ff: nv12
669 // https://www.fourcc.org/pixel-format/yuv-nv12/
670 RK_U8 *p = buf;
671 RK_U32 x, y;
672
673 for (y = 0; y < height; y++, p += hor_stride) {
674 for (x = 0; x < width; x++) {
675 p[x] = x + y + frame_count * 3;
676 }
677 }
678
679 p = buf + hor_stride * ver_stride;
680 for (y = 0; y < height / 2; y++, p += hor_stride) {
681 for (x = 0; x < width / 2; x++) {
682 p[x * 2 + 0] = 128 + y + frame_count * 2;
683 p[x * 2 + 1] = 64 + x + frame_count * 5;
684 }
685 }
686 }
687
fill_MPP_FMT_YUV422SP(RK_U8 * buf,RK_U32 width,RK_U32 height,RK_U32 hor_stride,RK_U32 ver_stride,RK_U32 frame_count)688 static void fill_MPP_FMT_YUV422SP(RK_U8 *buf, RK_U32 width, RK_U32 height,
689 RK_U32 hor_stride, RK_U32 ver_stride,
690 RK_U32 frame_count)
691 {
692 // MPP_FMT_YUV422SP = ff: nv16
693 // not valid in www.fourcc.org
694 RK_U8 *p = buf;
695 RK_U32 x, y;
696
697 for (y = 0; y < height; y++, p += hor_stride) {
698 for (x = 0; x < width; x++) {
699 p[x] = x + y + frame_count * 3;
700 }
701 }
702
703 p = buf + hor_stride * ver_stride;
704 for (y = 0; y < height; y++, p += hor_stride) {
705 for (x = 0; x < width / 2; x++) {
706 p[x * 2 + 0] = 128 + y / 2 + frame_count * 2;
707 p[x * 2 + 1] = 64 + x + frame_count * 5;
708 }
709 }
710 }
711
get_rgb_color(RK_U32 * R,RK_U32 * G,RK_U32 * B,RK_S32 x,RK_S32 y,RK_S32 frm_cnt)712 static void get_rgb_color(RK_U32 *R, RK_U32 *G, RK_U32 *B, RK_S32 x, RK_S32 y, RK_S32 frm_cnt)
713 {
714 // frame 0 -> red
715 if (frm_cnt == 0) {
716 R[0] = 0xff;
717 G[0] = 0;
718 B[0] = 0;
719 return ;
720 }
721
722 // frame 1 -> green
723 if (frm_cnt == 1) {
724 R[0] = 0;
725 G[0] = 0xff;
726 B[0] = 0;
727 return ;
728 }
729
730 // frame 2 -> blue
731 if (frm_cnt == 2) {
732 R[0] = 0;
733 G[0] = 0;
734 B[0] = 0xff;
735 return ;
736 }
737
738 // moving color bar
739 RK_U8 Y = (0 + x + y + frm_cnt * 3);
740 RK_U8 U = (128 + (y / 2) + frm_cnt * 2);
741 RK_U8 V = (64 + (x / 2) + frm_cnt * 5);
742
743 RK_S32 _R = Y + ((360 * (V - 128)) >> 8);
744 RK_S32 _G = Y - (((88 * (U - 128) + 184 * (V - 128))) >> 8);
745 RK_S32 _B = Y + ((455 * (U - 128)) >> 8);
746
747 R[0] = MPP_CLIP3(0, 255, _R);
748 G[0] = MPP_CLIP3(0, 255, _G);
749 B[0] = MPP_CLIP3(0, 255, _B);
750 }
751
fill_MPP_FMT_RGB565(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)752 static void fill_MPP_FMT_RGB565(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
753 {
754 // MPP_FMT_RGB565 = ff: rgb565be
755 // 16 bit pixel MSB --------> LSB
756 // (rrrr,rggg,gggb,bbbb)
757 // big endian | byte 0 | byte 1 |
758 // little endian | byte 1 | byte 0 |
759 RK_U16 val = (((R >> 3) & 0x1f) << 11) |
760 (((G >> 2) & 0x3f) << 5) |
761 (((B >> 3) & 0x1f) << 0);
762 if (be) {
763 p[0] = (val >> 8) & 0xff;
764 p[1] = (val >> 0) & 0xff;
765 } else {
766 p[0] = (val >> 0) & 0xff;
767 p[1] = (val >> 8) & 0xff;
768 }
769 }
770
fill_MPP_FMT_BGR565(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)771 static void fill_MPP_FMT_BGR565(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
772 {
773 // MPP_FMT_BGR565 = ff: bgr565be
774 // 16 bit pixel MSB --------> LSB
775 // (bbbb,bggg,gggr,rrrr)
776 // big endian | byte 0 | byte 1 |
777 // little endian | byte 1 | byte 0 |
778 RK_U16 val = (((R >> 3) & 0x1f) << 0) |
779 (((G >> 2) & 0x3f) << 5) |
780 (((B >> 3) & 0x1f) << 11);
781 if (be) {
782 p[0] = (val >> 8) & 0xff;
783 p[1] = (val >> 0) & 0xff;
784 } else {
785 p[0] = (val >> 0) & 0xff;
786 p[1] = (val >> 8) & 0xff;
787 }
788 }
789
fill_MPP_FMT_RGB555(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)790 static void fill_MPP_FMT_RGB555(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
791 {
792 // MPP_FMT_RGB555 = ff: rgb555be
793 // 16 bit pixel MSB --------> LSB
794 // (0rrr,rrgg,gggb,bbbb)
795 // big endian | byte 0 | byte 1 |
796 // little endian | byte 1 | byte 0 |
797 RK_U16 val = (((R >> 3) & 0x1f) << 10) |
798 (((G >> 3) & 0x1f) << 5) |
799 (((B >> 3) & 0x1f) << 0);
800 if (be) {
801 p[0] = (val >> 8) & 0xff;
802 p[1] = (val >> 0) & 0xff;
803 } else {
804 p[0] = (val >> 0) & 0xff;
805 p[1] = (val >> 8) & 0xff;
806 }
807 }
808
fill_MPP_FMT_BGR555(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)809 static void fill_MPP_FMT_BGR555(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
810 {
811 // MPP_FMT_BGR555 = ff: bgr555be
812 // 16 bit pixel MSB --------> LSB
813 // (0bbb,bbgg,gggr,rrrr)
814 // big endian | byte 0 | byte 1 |
815 // little endian | byte 1 | byte 0 |
816 RK_U16 val = (((R >> 3) & 0x1f) << 0) |
817 (((G >> 3) & 0x1f) << 5) |
818 (((B >> 3) & 0x1f) << 10);
819 if (be) {
820 p[0] = (val >> 8) & 0xff;
821 p[1] = (val >> 0) & 0xff;
822 } else {
823 p[0] = (val >> 0) & 0xff;
824 p[1] = (val >> 8) & 0xff;
825 }
826 }
827
fill_MPP_FMT_RGB444(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)828 static void fill_MPP_FMT_RGB444(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
829 {
830 // MPP_FMT_RGB444 = ff: rgb444be
831 // 16 bit pixel MSB --------> LSB
832 // (0000,rrrr,gggg,bbbb)
833 // big endian | byte 0 | byte 1 |
834 // little endian | byte 1 | byte 0 |
835 RK_U16 val = (((R >> 4) & 0xf) << 8) |
836 (((G >> 4) & 0xf) << 4) |
837 (((B >> 4) & 0xf) << 0);
838 if (be) {
839 p[0] = (val >> 8) & 0xff;
840 p[1] = (val >> 0) & 0xff;
841 } else {
842 p[0] = (val >> 0) & 0xff;
843 p[1] = (val >> 8) & 0xff;
844 }
845 }
846
fill_MPP_FMT_BGR444(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)847 static void fill_MPP_FMT_BGR444(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
848 {
849 // MPP_FMT_BGR444 = ff: bgr444be
850 // 16 bit pixel MSB --------> LSB
851 // (0000,bbbb,gggg,rrrr)
852 // big endian | byte 0 | byte 1 |
853 // little endian | byte 1 | byte 0 |
854 RK_U16 val = (((R >> 4) & 0xf) << 0) |
855 (((G >> 4) & 0xf) << 4) |
856 (((B >> 4) & 0xf) << 8);
857 if (be) {
858 p[0] = (val >> 8) & 0xff;
859 p[1] = (val >> 0) & 0xff;
860 } else {
861 p[0] = (val >> 0) & 0xff;
862 p[1] = (val >> 8) & 0xff;
863 }
864 }
865
fill_MPP_FMT_RGB888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)866 static void fill_MPP_FMT_RGB888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
867 {
868 // MPP_FMT_RGB888
869 // 24 bit pixel MSB --------> LSB
870 // (rrrr,rrrr,gggg,gggg,bbbb,bbbb)
871 // big endian | byte 0 | byte 1 | byte 2 |
872 // little endian | byte 2 | byte 1 | byte 0 |
873 if (be) {
874 p[0] = R;
875 p[1] = G;
876 p[2] = B;
877 } else {
878 p[0] = B;
879 p[1] = G;
880 p[2] = R;
881 }
882 }
883
fill_MPP_FMT_BGR888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)884 static void fill_MPP_FMT_BGR888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
885 {
886 // MPP_FMT_BGR888
887 // 24 bit pixel MSB --------> LSB
888 // (bbbb,bbbb,gggg,gggg,rrrr,rrrr)
889 // big endian | byte 0 | byte 1 | byte 2 |
890 // little endian | byte 2 | byte 1 | byte 0 |
891 if (be) {
892 p[0] = B;
893 p[1] = G;
894 p[2] = R;
895 } else {
896 p[0] = R;
897 p[1] = G;
898 p[2] = B;
899 }
900 }
901
fill_MPP_FMT_RGB101010(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)902 static void fill_MPP_FMT_RGB101010(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
903 {
904 // MPP_FMT_RGB101010
905 // 32 bit pixel MSB --------> LSB
906 // (00rr,rrrr,rrrr,gggg,gggg,ggbb,bbbb,bbbb)
907 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
908 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
909 RK_U32 val = (((R * 4) & 0x3ff) << 20) |
910 (((G * 4) & 0x3ff) << 10) |
911 (((B * 4) & 0x3ff) << 0);
912 if (be) {
913 p[0] = (val >> 24) & 0xff;
914 p[1] = (val >> 16) & 0xff;
915 p[2] = (val >> 8) & 0xff;
916 p[3] = (val >> 0) & 0xff;
917 } else {
918 p[0] = (val >> 0) & 0xff;
919 p[1] = (val >> 8) & 0xff;
920 p[2] = (val >> 16) & 0xff;
921 p[3] = (val >> 24) & 0xff;
922 }
923 }
924
fill_MPP_FMT_BGR101010(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)925 static void fill_MPP_FMT_BGR101010(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
926 {
927 // MPP_FMT_BGR101010
928 // 32 bit pixel MSB --------> LSB
929 // (00bb,bbbb,bbbb,gggg,gggg,ggrr,rrrr,rrrr)
930 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
931 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
932 RK_U32 val = (((R * 4) & 0x3ff) << 0) |
933 (((G * 4) & 0x3ff) << 10) |
934 (((B * 4) & 0x3ff) << 20);
935 if (be) {
936 p[0] = (val >> 24) & 0xff;
937 p[1] = (val >> 16) & 0xff;
938 p[2] = (val >> 8) & 0xff;
939 p[3] = (val >> 0) & 0xff;
940 } else {
941 p[0] = (val >> 0) & 0xff;
942 p[1] = (val >> 8) & 0xff;
943 p[2] = (val >> 16) & 0xff;
944 p[3] = (val >> 24) & 0xff;
945 }
946 }
947
fill_MPP_FMT_ARGB8888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)948 static void fill_MPP_FMT_ARGB8888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
949 {
950 // MPP_FMT_ARGB8888
951 // 32 bit pixel MSB --------> LSB
952 // (XXXX,XXXX,rrrr,rrrr,gggg,gggg,bbbb,bbbb)
953 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
954 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
955 if (be) {
956 p[0] = 0xff;
957 p[1] = R;
958 p[2] = G;
959 p[3] = B;
960 } else {
961 p[0] = B;
962 p[1] = G;
963 p[2] = R;
964 p[3] = 0xff;
965 }
966 }
967
fill_MPP_FMT_ABGR8888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)968 static void fill_MPP_FMT_ABGR8888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
969 {
970 // MPP_FMT_ABGR8888
971 // 32 bit pixel MSB --------> LSB
972 // (XXXX,XXXX,bbbb,bbbb,gggg,gggg,rrrr,rrrr)
973 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
974 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
975 if (be) {
976 p[0] = 0xff;
977 p[1] = B;
978 p[2] = G;
979 p[3] = R;
980 } else {
981 p[0] = R;
982 p[1] = G;
983 p[2] = B;
984 p[3] = 0xff;
985 }
986 }
987
fill_MPP_FMT_BGRA8888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)988 static void fill_MPP_FMT_BGRA8888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
989 {
990 // MPP_FMT_BGRA8888
991 // 32 bit pixel MSB --------> LSB
992 // (bbbb,bbbb,gggg,gggg,rrrr,rrrr,XXXX,XXXX)
993 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
994 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
995 if (be) {
996 p[0] = B;
997 p[1] = G;
998 p[2] = R;
999 p[3] = 0xff;
1000 } else {
1001 p[0] = 0xff;
1002 p[1] = R;
1003 p[2] = G;
1004 p[3] = B;
1005 }
1006 }
1007
fill_MPP_FMT_RGBA8888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)1008 static void fill_MPP_FMT_RGBA8888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be)
1009 {
1010 // MPP_FMT_RGBA8888
1011 // 32 bit pixel MSB --------> LSB
1012 // (rrrr,rrrr,gggg,gggg,bbbb,bbbb,XXXX,XXXX)
1013 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
1014 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
1015 if (be) {
1016 p[0] = R;
1017 p[1] = G;
1018 p[2] = B;
1019 p[3] = 0xff;
1020 } else {
1021 p[0] = 0xff;
1022 p[1] = B;
1023 p[2] = G;
1024 p[3] = R;
1025 }
1026 }
1027
1028 typedef void (*FillRgbFunc)(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be);
1029
1030 FillRgbFunc fill_rgb_funcs[] = {
1031 fill_MPP_FMT_RGB565,
1032 fill_MPP_FMT_BGR565,
1033 fill_MPP_FMT_RGB555,
1034 fill_MPP_FMT_BGR555,
1035 fill_MPP_FMT_RGB444,
1036 fill_MPP_FMT_BGR444,
1037 fill_MPP_FMT_RGB888,
1038 fill_MPP_FMT_BGR888,
1039 fill_MPP_FMT_RGB101010,
1040 fill_MPP_FMT_BGR101010,
1041 fill_MPP_FMT_ARGB8888,
1042 fill_MPP_FMT_ABGR8888,
1043 fill_MPP_FMT_BGRA8888,
1044 fill_MPP_FMT_RGBA8888,
1045 };
1046
util_check_stride_by_pixel(RK_S32 workaround,RK_S32 width,RK_S32 hor_stride,RK_S32 pixel_size)1047 static RK_S32 util_check_stride_by_pixel(RK_S32 workaround, RK_S32 width,
1048 RK_S32 hor_stride, RK_S32 pixel_size)
1049 {
1050 if (!workaround && hor_stride < width * pixel_size) {
1051 mpp_log("warning: stride by bytes %d is smarller than width %d mutiple by pixel size %d\n",
1052 hor_stride, width, pixel_size);
1053 mpp_log("multiple stride %d by pixel size %d and set new byte stride to %d\n",
1054 hor_stride, pixel_size, hor_stride * pixel_size);
1055 workaround = 1;
1056 }
1057
1058 return workaround;
1059 }
1060
util_check_8_pixel_aligned(RK_S32 workaround,RK_S32 hor_stride,RK_S32 pixel_aign,RK_S32 pixel_size,const char * fmt_name)1061 static RK_S32 util_check_8_pixel_aligned(RK_S32 workaround, RK_S32 hor_stride,
1062 RK_S32 pixel_aign, RK_S32 pixel_size,
1063 const char *fmt_name)
1064 {
1065 if (!workaround && hor_stride != MPP_ALIGN_GEN(hor_stride, pixel_aign * pixel_size)) {
1066 mpp_log("warning: vepu only support 8 aligned horizontal stride in pixel for %s with pixel size %d\n",
1067 fmt_name, pixel_size);
1068 mpp_log("set byte stride to %d to match the requirement\n",
1069 MPP_ALIGN_GEN(hor_stride, pixel_aign * pixel_size));
1070 workaround = 1;
1071 }
1072
1073 return workaround;
1074 }
1075
fill_image(RK_U8 * buf,RK_U32 width,RK_U32 height,RK_U32 hor_stride,RK_U32 ver_stride,MppFrameFormat fmt,RK_U32 frame_count)1076 MPP_RET fill_image(RK_U8 *buf, RK_U32 width, RK_U32 height,
1077 RK_U32 hor_stride, RK_U32 ver_stride, MppFrameFormat fmt,
1078 RK_U32 frame_count)
1079 {
1080 MPP_RET ret = MPP_OK;
1081 RK_U8 *buf_y = buf;
1082 RK_U8 *buf_c = buf + hor_stride * ver_stride;
1083 RK_U32 x, y, i;
1084 static RK_S32 is_pixel_stride = 0;
1085 static RK_S32 not_8_pixel = 0;
1086
1087 switch (fmt & MPP_FRAME_FMT_MASK) {
1088 case MPP_FMT_YUV420SP : {
1089 fill_MPP_FMT_YUV420SP(buf, width, height, hor_stride, ver_stride, frame_count);
1090 } break;
1091 case MPP_FMT_YUV422SP : {
1092 fill_MPP_FMT_YUV422SP(buf, width, height, hor_stride, ver_stride, frame_count);
1093 } break;
1094 case MPP_FMT_YUV420P : {
1095 RK_U8 *p = buf_y;
1096
1097 for (y = 0; y < height; y++, p += hor_stride) {
1098 for (x = 0; x < width; x++) {
1099 p[x] = x + y + frame_count * 3;
1100 }
1101 }
1102
1103 p = buf_c;
1104 for (y = 0; y < height / 2; y++, p += hor_stride / 2) {
1105 for (x = 0; x < width / 2; x++) {
1106 p[x] = 128 + y + frame_count * 2;
1107 }
1108 }
1109
1110 p = buf_c + hor_stride * ver_stride / 4;
1111 for (y = 0; y < height / 2; y++, p += hor_stride / 2) {
1112 for (x = 0; x < width / 2; x++) {
1113 p[x] = 64 + x + frame_count * 5;
1114 }
1115 }
1116 } break;
1117 case MPP_FMT_YUV420SP_VU : {
1118 RK_U8 *p = buf_y;
1119
1120 for (y = 0; y < height; y++, p += hor_stride) {
1121 for (x = 0; x < width; x++) {
1122 p[x] = x + y + frame_count * 3;
1123 }
1124 }
1125
1126 p = buf_c;
1127 for (y = 0; y < height / 2; y++, p += hor_stride) {
1128 for (x = 0; x < width / 2; x++) {
1129 p[x * 2 + 1] = 128 + y + frame_count * 2;
1130 p[x * 2 + 0] = 64 + x + frame_count * 5;
1131 }
1132 }
1133 } break;
1134 case MPP_FMT_YUV422P : {
1135 RK_U8 *p = buf_y;
1136
1137 for (y = 0; y < height; y++, p += hor_stride) {
1138 for (x = 0; x < width; x++) {
1139 p[x] = x + y + frame_count * 3;
1140 }
1141 }
1142
1143 p = buf_c;
1144 for (y = 0; y < height; y++, p += hor_stride / 2) {
1145 for (x = 0; x < width / 2; x++) {
1146 p[x] = 128 + y / 2 + frame_count * 2;
1147 }
1148 }
1149
1150 p = buf_c + hor_stride * ver_stride / 2;
1151 for (y = 0; y < height; y++, p += hor_stride / 2) {
1152 for (x = 0; x < width / 2; x++) {
1153 p[x] = 64 + x + frame_count * 5;
1154 }
1155 }
1156 } break;
1157 case MPP_FMT_YUV422SP_VU : {
1158 RK_U8 *p = buf_y;
1159
1160 for (y = 0; y < height; y++, p += hor_stride) {
1161 for (x = 0; x < width; x++) {
1162 p[x] = x + y + frame_count * 3;
1163 }
1164 }
1165
1166 p = buf_c;
1167 for (y = 0; y < height; y++, p += hor_stride) {
1168 for (x = 0; x < width / 2; x++) {
1169 p[x * 2 + 1] = 128 + y / 2 + frame_count * 2;
1170 p[x * 2 + 0] = 64 + x + frame_count * 5;
1171 }
1172 }
1173 } break;
1174 case MPP_FMT_YUV422_YUYV : {
1175 RK_U8 *p = buf_y;
1176
1177 for (y = 0; y < height; y++, p += hor_stride) {
1178 for (x = 0; x < width / 2; x++) {
1179 p[x * 4 + 0] = x * 2 + 0 + y + frame_count * 3;
1180 p[x * 4 + 2] = x * 2 + 1 + y + frame_count * 3;
1181 p[x * 4 + 1] = 128 + y / 2 + frame_count * 2;
1182 p[x * 4 + 3] = 64 + x + frame_count * 5;
1183 }
1184 }
1185 } break;
1186 case MPP_FMT_YUV422_YVYU : {
1187 RK_U8 *p = buf_y;
1188
1189 for (y = 0; y < height; y++, p += hor_stride) {
1190 for (x = 0; x < width / 2; x++) {
1191 p[x * 4 + 0] = x * 2 + 0 + y + frame_count * 3;
1192 p[x * 4 + 2] = x * 2 + 1 + y + frame_count * 3;
1193 p[x * 4 + 3] = 128 + y / 2 + frame_count * 2;
1194 p[x * 4 + 1] = 64 + x + frame_count * 5;
1195 }
1196 }
1197 } break;
1198 case MPP_FMT_YUV422_UYVY : {
1199 RK_U8 *p = buf_y;
1200
1201 for (y = 0; y < height; y++, p += hor_stride) {
1202 for (x = 0; x < width / 2; x++) {
1203 p[x * 4 + 1] = x * 2 + 0 + y + frame_count * 3;
1204 p[x * 4 + 3] = x * 2 + 1 + y + frame_count * 3;
1205 p[x * 4 + 0] = 128 + y / 2 + frame_count * 2;
1206 p[x * 4 + 2] = 64 + x + frame_count * 5;
1207 }
1208 }
1209 } break;
1210 case MPP_FMT_YUV422_VYUY : {
1211 RK_U8 *p = buf_y;
1212
1213 for (y = 0; y < height; y++, p += hor_stride) {
1214 for (x = 0; x < width / 2; x++) {
1215 p[x * 4 + 1] = x * 2 + 0 + y + frame_count * 3;
1216 p[x * 4 + 3] = x * 2 + 1 + y + frame_count * 3;
1217 p[x * 4 + 2] = 128 + y / 2 + frame_count * 2;
1218 p[x * 4 + 0] = 64 + x + frame_count * 5;
1219 }
1220 }
1221 } break;
1222 case MPP_FMT_YUV400 : {
1223 RK_U8 *p = buf_y;
1224
1225 for (y = 0; y < height; y++, p += hor_stride) {
1226 for (x = 0; x < width; x++) {
1227 p[x] = x + y + frame_count * 3;
1228 }
1229 }
1230 } break;
1231 case MPP_FMT_YUV444SP : {
1232 RK_U8 *p = buf_y;
1233
1234 for (y = 0; y < height; y++, p += hor_stride) {
1235 for (x = 0; x < width; x++) {
1236 p[x] = x + y + frame_count * 3;
1237 }
1238 }
1239
1240 p = buf + hor_stride * ver_stride;
1241 for (y = 0; y < height; y++, p += hor_stride * 2) {
1242 for (x = 0; x < width; x++) {
1243 p[x * 2 + 0] = 128 + y / 2 + frame_count * 2;
1244 p[x * 2 + 1] = 64 + x + frame_count * 5;
1245 }
1246 }
1247 } break;
1248 case MPP_FMT_YUV444P : {
1249 RK_U8 *p = buf_y;
1250
1251 for (y = 0; y < height; y++, p += hor_stride) {
1252 for (x = 0; x < width; x++) {
1253 p[x] = x + y + frame_count * 3;
1254 }
1255 }
1256 p = buf + hor_stride * ver_stride;
1257 for (y = 0; y < height; y++, p += hor_stride) {
1258 for (x = 0; x < width; x++) {
1259 p[x] = 128 + y / 2 + frame_count * 2;
1260 }
1261 }
1262 p = buf + hor_stride * ver_stride * 2;
1263 for (y = 0; y < height; y++, p += hor_stride) {
1264 for (x = 0; x < width; x++) {
1265 p[x] = 64 + x + frame_count * 5;
1266 }
1267 }
1268 } break;
1269 case MPP_FMT_RGB565 :
1270 case MPP_FMT_BGR565 :
1271 case MPP_FMT_RGB555 :
1272 case MPP_FMT_BGR555 :
1273 case MPP_FMT_RGB444 :
1274 case MPP_FMT_BGR444 : {
1275 RK_U8 *p = buf_y;
1276 RK_U32 pix_w = 2;
1277 FillRgbFunc fill = fill_rgb_funcs[fmt - MPP_FRAME_FMT_RGB];
1278
1279 if (util_check_stride_by_pixel(is_pixel_stride, width, hor_stride, pix_w)) {
1280 hor_stride *= pix_w;
1281 is_pixel_stride = 1;
1282 }
1283
1284 if (util_check_8_pixel_aligned(not_8_pixel, hor_stride,
1285 8, pix_w, "16bit RGB")) {
1286 hor_stride = MPP_ALIGN(hor_stride, 16);
1287 not_8_pixel = 1;
1288 }
1289
1290 for (y = 0; y < height; y++, p += hor_stride) {
1291 for (x = 0, i = 0; x < width; x++, i += pix_w) {
1292 RK_U32 R, G, B;
1293
1294 get_rgb_color(&R, &G, &B, x, y, frame_count);
1295 fill(p + i, R, G, B, MPP_FRAME_FMT_IS_BE(fmt));
1296 }
1297 }
1298 } break;
1299 case MPP_FMT_RGB101010 :
1300 case MPP_FMT_BGR101010 :
1301 case MPP_FMT_ARGB8888 :
1302 case MPP_FMT_ABGR8888 :
1303 case MPP_FMT_BGRA8888 :
1304 case MPP_FMT_RGBA8888 : {
1305 RK_U8 *p = buf_y;
1306 RK_U32 pix_w = 4;
1307 FillRgbFunc fill = fill_rgb_funcs[fmt - MPP_FRAME_FMT_RGB];
1308
1309 if (util_check_stride_by_pixel(is_pixel_stride, width, hor_stride, pix_w)) {
1310 hor_stride *= pix_w;
1311 is_pixel_stride = 1;
1312 }
1313
1314 if (util_check_8_pixel_aligned(not_8_pixel, hor_stride,
1315 8, pix_w, "32bit RGB")) {
1316 hor_stride = MPP_ALIGN(hor_stride, 32);
1317 not_8_pixel = 1;
1318 }
1319
1320 for (y = 0; y < height; y++, p += hor_stride) {
1321 for (x = 0, i = 0; x < width; x++, i += pix_w) {
1322 RK_U32 R, G, B;
1323
1324 get_rgb_color(&R, &G, &B, x, y, frame_count);
1325 fill(p + i, R, G, B, MPP_FRAME_FMT_IS_BE(fmt));
1326 }
1327 }
1328 } break;
1329 case MPP_FMT_BGR888 :
1330 case MPP_FMT_RGB888 : {
1331 RK_U8 *p = buf_y;
1332 RK_U32 pix_w = 3;
1333 FillRgbFunc fill = fill_rgb_funcs[fmt - MPP_FRAME_FMT_RGB];
1334
1335 if (util_check_stride_by_pixel(is_pixel_stride, width, hor_stride, pix_w)) {
1336 hor_stride *= pix_w;
1337 is_pixel_stride = 1;
1338 }
1339
1340 if (util_check_8_pixel_aligned(not_8_pixel, hor_stride,
1341 8, pix_w, "24bit RGB")) {
1342 hor_stride = MPP_ALIGN_GEN(hor_stride, 24);
1343 not_8_pixel = 1;
1344 }
1345
1346 for (y = 0; y < height; y++, p += hor_stride) {
1347 for (x = 0, i = 0; x < width; x++, i += pix_w) {
1348 RK_U32 R, G, B;
1349
1350 get_rgb_color(&R, &G, &B, x, y, frame_count);
1351 fill(p + i, R, G, B, MPP_FRAME_FMT_IS_BE(fmt));
1352 }
1353 }
1354 } break;
1355 default : {
1356 mpp_err_f("filling function do not support type %d\n", fmt);
1357 ret = MPP_NOK;
1358 } break;
1359 }
1360 return ret;
1361 }
1362
parse_config_line(const char * str,OpsLine * info)1363 RK_S32 parse_config_line(const char *str, OpsLine *info)
1364 {
1365 RK_S32 cnt = sscanf(str, "%*[^,],%d,%[^,],%llu,%llu\n",
1366 &info->index, info->cmd,
1367 &info->value1, &info->value2);
1368
1369 return cnt;
1370 }
1371
get_extension(const char * file_name,char * extension)1372 static void get_extension(const char *file_name, char *extension)
1373 {
1374 size_t length = strlen(file_name);
1375 size_t ext_len = 0;
1376 size_t i = 0;
1377 const char *p = file_name + length - 1;
1378
1379 while (p >= file_name) {
1380 if (p[0] == '.') {
1381 for (i = 0; i < ext_len; i++)
1382 extension[i] = tolower(p[i + 1]);
1383
1384 extension[i] = '\0';
1385 return ;
1386 }
1387 ext_len++;
1388 p--;
1389 }
1390
1391 extension[0] = '\0';
1392 }
1393
1394 typedef struct Ext2FrmFmt_t {
1395 const char *ext_name;
1396 MppFrameFormat format;
1397 } Ext2FrmFmt;
1398
1399 Ext2FrmFmt map_ext_to_frm_fmt[] = {
1400 { "yuv420p", MPP_FMT_YUV420P, },
1401 { "yuv420sp", MPP_FMT_YUV420SP, },
1402 { "yuv422p", MPP_FMT_YUV422P, },
1403 { "yuv422sp", MPP_FMT_YUV422SP, },
1404 { "yuv422uyvy", MPP_FMT_YUV422_UYVY, },
1405 { "yuv422vyuy", MPP_FMT_YUV422_VYUY, },
1406 { "yuv422yuyv", MPP_FMT_YUV422_YUYV, },
1407 { "yuv422yvyu", MPP_FMT_YUV422_YVYU, },
1408
1409 { "abgr8888", MPP_FMT_ABGR8888, },
1410 { "argb8888", MPP_FMT_ARGB8888, },
1411 { "bgr565", MPP_FMT_BGR565, },
1412 { "bgr888", MPP_FMT_BGR888, },
1413 { "bgra8888", MPP_FMT_BGRA8888, },
1414 { "rgb565", MPP_FMT_RGB565, },
1415 { "rgb888", MPP_FMT_RGB888, },
1416 { "rgba8888", MPP_FMT_RGBA8888, },
1417
1418 { "fbc", MPP_FMT_YUV420SP | MPP_FRAME_FBC_AFBC_V1, },
1419 };
1420
name_to_frame_format(const char * name,MppFrameFormat * fmt)1421 MPP_RET name_to_frame_format(const char *name, MppFrameFormat *fmt)
1422 {
1423 RK_U32 i;
1424 MPP_RET ret = MPP_NOK;
1425 char ext[50];
1426
1427 get_extension(name, ext);
1428
1429 for (i = 0; i < MPP_ARRAY_ELEMS(map_ext_to_frm_fmt); i++) {
1430 Ext2FrmFmt *info = &map_ext_to_frm_fmt[i];
1431
1432 if (!strcmp(ext, info->ext_name)) {
1433 *fmt = info->format;
1434 ret = MPP_OK;
1435 }
1436 }
1437
1438 return ret;
1439 }
1440
1441 typedef struct Ext2Coding_t {
1442 const char *ext_name;
1443 MppCodingType coding;
1444 } Ext2Coding;
1445
1446 Ext2Coding map_ext_to_coding[] = {
1447 { "h264", MPP_VIDEO_CodingAVC, },
1448 { "264", MPP_VIDEO_CodingAVC, },
1449 { "avc", MPP_VIDEO_CodingAVC, },
1450
1451 { "h265", MPP_VIDEO_CodingHEVC, },
1452 { "265", MPP_VIDEO_CodingHEVC, },
1453 { "hevc", MPP_VIDEO_CodingHEVC, },
1454
1455 { "jpg", MPP_VIDEO_CodingMJPEG, },
1456 { "jpeg", MPP_VIDEO_CodingMJPEG, },
1457 { "mjpeg", MPP_VIDEO_CodingMJPEG, },
1458 };
1459
name_to_coding_type(const char * name,MppCodingType * coding)1460 MPP_RET name_to_coding_type(const char *name, MppCodingType *coding)
1461 {
1462 RK_U32 i;
1463 MPP_RET ret = MPP_NOK;
1464 char ext[50];
1465
1466 get_extension(name, ext);
1467
1468 for (i = 0; i < MPP_ARRAY_ELEMS(map_ext_to_coding); i++) {
1469 Ext2Coding *info = &map_ext_to_coding[i];
1470
1471 if (!strcmp(ext, info->ext_name)) {
1472 *coding = info->coding;
1473 ret = MPP_OK;
1474 }
1475 }
1476
1477 return ret;
1478 }
1479
1480 typedef struct FpsCalcImpl_t {
1481 spinlock_t lock;
1482 FpsCalcCb callback;
1483
1484 RK_S64 total_start;
1485 RK_S64 total_count;
1486
1487 RK_S64 last_start;
1488 RK_S64 last_count;
1489 } FpsCalcImpl;
1490
fps_calc_init(FpsCalc * ctx)1491 MPP_RET fps_calc_init(FpsCalc *ctx)
1492 {
1493 FpsCalcImpl *impl = mpp_calloc(FpsCalcImpl, 1);
1494 MPP_RET ret = MPP_NOK;
1495
1496 if (impl) {
1497 mpp_spinlock_init(&impl->lock);
1498 ret = MPP_OK;
1499 }
1500 *ctx = impl;
1501
1502 return ret;
1503 }
1504
fps_calc_deinit(FpsCalc ctx)1505 MPP_RET fps_calc_deinit(FpsCalc ctx)
1506 {
1507 MPP_FREE(ctx);
1508
1509 return MPP_OK;
1510 }
1511
fps_calc_set_cb(FpsCalc ctx,FpsCalcCb cb)1512 MPP_RET fps_calc_set_cb(FpsCalc ctx, FpsCalcCb cb)
1513 {
1514 FpsCalcImpl *impl = (FpsCalcImpl *)ctx;
1515
1516 if (impl)
1517 impl->callback = cb;
1518
1519 return MPP_OK;
1520 }
1521
fps_calc_inc(FpsCalc ctx)1522 MPP_RET fps_calc_inc(FpsCalc ctx)
1523 {
1524 FpsCalcImpl *impl = (FpsCalcImpl *)ctx;
1525 RK_S64 total_time = 0;
1526 RK_S64 total_count = 0;
1527 RK_S64 last_time = 0;
1528 RK_S64 last_count = 0;
1529
1530 if (NULL == impl)
1531 return MPP_OK;
1532
1533 mpp_spinlock_lock(&impl->lock);
1534 {
1535 RK_S64 now = mpp_time();
1536 if (!impl->total_count) {
1537 impl->total_start = now;
1538 impl->last_start = now;
1539 } else {
1540 RK_S64 elapsed = now - impl->last_start;
1541
1542 // print on each second
1543 if (elapsed >= 1000000) {
1544 total_time = now - impl->total_start;
1545 total_count = impl->total_count;
1546 last_time = now - impl->last_start;
1547 last_count = impl->total_count - impl->last_count;
1548
1549 impl->last_start = now;
1550 impl->last_count = impl->total_count;
1551 }
1552 }
1553
1554 impl->total_count++;
1555 }
1556 mpp_spinlock_unlock(&impl->lock);
1557
1558 if (impl->callback && total_time)
1559 impl->callback(total_time, total_count, last_time, last_count);
1560
1561 return MPP_OK;
1562 }
1563
1564 /*
1565 * @brief convert a string that discribes decimal, octal and hexadecimal
1566 * number to a long integer.
1567 * @param[in] nptr - a string to convert to long integer
1568 * @param[out] number - long integer converted from a string
1569 */
str_to_frm_fmt(const char * nptr,long * number)1570 MPP_RET str_to_frm_fmt(const char *nptr, long *number)
1571 {
1572 MPP_RET ret = MPP_NOK;
1573
1574 if (NULL == nptr || NULL == number) {
1575 mpp_err_f("invalid input nptr %p number %p is_valid %p\n", nptr, number);
1576 ret = MPP_ERR_NULL_PTR;
1577 goto RET;
1578 }
1579
1580 if (nptr) {
1581 char *endptr = NULL; /* pointer to additional chars */
1582 long tmp = 0;
1583
1584 /* reset errno to 0 before call */
1585 errno = 0;
1586
1587 tmp = strtol(nptr, &endptr, 0);
1588
1589 if (nptr == endptr)
1590 mpp_err("format: 0x%lx invalid (no digits found, 0 returned)", tmp);
1591 else if (errno == ERANGE && tmp == LONG_MIN)
1592 mpp_err("format: 0x%lx invalid (underflow occurred)", tmp);
1593 else if (errno == ERANGE && tmp == LONG_MAX)
1594 mpp_err("format: 0x%lx invalid (overflow occurred)", tmp);
1595 else if (errno == EINVAL) /* not in all c99 implementations - gcc OK */
1596 mpp_err("format: 0x%lx invalid (base contains unsupported value)", tmp);
1597 else if (errno != 0 && tmp == 0)
1598 mpp_err("format: 0x%lx invalid (unspecified error occurred)", tmp);
1599 else if (errno == 0 && nptr && *endptr != 0)
1600 mpp_err("format: 0x%lx invalid (additional characters remain)", tmp);
1601 else if (errno == 0 && nptr && !*endptr) {
1602 if ((tmp < (MPP_FMT_BUTT | MPP_FRAME_FMT_PROP_MASK)) && tmp >= 0) {
1603 *number = tmp;
1604 ret = MPP_OK;
1605 } else {
1606 mpp_err("format: 0x%lx invalid (not format value)", tmp);
1607 }
1608 }
1609 }
1610
1611 RET:
1612 return ret;
1613 }
1614