1 /*
2 * Copyright 2020 Rockchip Electronics Co. LTD
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <stdlib.h>
18
19 #include "test_comm_imgproc.h"
20 #include "test_comm_utils.h"
21
22 #define RK_CLIP3(l, h, a) ((a) < (l) ? (l) : ((a) > (h) ? (h) : (a)))
23
get_rgb_color(RK_U32 * R,RK_U32 * G,RK_U32 * B,RK_S32 x,RK_S32 y,RK_S32 frm_cnt)24 static void get_rgb_color(RK_U32 *R, RK_U32 *G, RK_U32 *B, RK_S32 x, RK_S32 y, RK_S32 frm_cnt) {
25 // moving color bar
26 RK_U8 Y = (0 + x + y + frm_cnt * 3);
27 RK_U8 U = (128 + (y / 2) + frm_cnt * 2);
28 RK_U8 V = (64 + (x / 2) + frm_cnt * 5);
29
30 RK_S32 _R = Y + ((360 * (V - 128)) >> 8);
31 RK_S32 _G = Y - (((88 * (U - 128) + 184 * (V - 128))) >> 8);
32 RK_S32 _B = Y + ((455 * (U - 128)) >> 8);
33
34 R[0] = RK_CLIP3(0, 255, _R);
35 G[0] = RK_CLIP3(0, 255, _G);
36 B[0] = RK_CLIP3(0, 255, _B);
37 }
38
fill_MPP_FMT_RGB565(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)39 static void fill_MPP_FMT_RGB565(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
40 // MPP_FMT_RGB565 = tmedia: rgb565be
41 // 16 bit pixel MSB --------> LSB
42 // (rrrr,rggg,gggb,bbbb)
43 // big endian | byte 0 | byte 1 |
44 // little endian | byte 1 | byte 0 |
45 RK_U16 val = (((R >> 3) & 0x1f) << 11) |
46 (((G >> 2) & 0x3f) << 5) |
47 (((B >> 3) & 0x1f) << 0);
48 if (be) {
49 p[0] = (val >> 8) & 0xff;
50 p[1] = (val >> 0) & 0xff;
51 } else {
52 p[0] = (val >> 0) & 0xff;
53 p[1] = (val >> 8) & 0xff;
54 }
55 }
56
fill_MPP_FMT_BGR565(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)57 static void fill_MPP_FMT_BGR565(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
58 // MPP_FMT_BGR565 = tmedia: bgr565be
59 // 16 bit pixel MSB --------> LSB
60 // (bbbb,bggg,gggr,rrrr)
61 // big endian | byte 0 | byte 1 |
62 // little endian | byte 1 | byte 0 |
63 RK_U16 val = (((R >> 3) & 0x1f) << 0) |
64 (((G >> 2) & 0x3f) << 5) |
65 (((B >> 3) & 0x1f) << 11);
66 if (be) {
67 p[0] = (val >> 8) & 0xff;
68 p[1] = (val >> 0) & 0xff;
69 } else {
70 p[0] = (val >> 0) & 0xff;
71 p[1] = (val >> 8) & 0xff;
72 }
73 }
74
fill_MPP_FMT_RGB555(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)75 static void fill_MPP_FMT_RGB555(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
76 // MPP_FMT_RGB555 = tmedia: rgb555be
77 // 16 bit pixel MSB --------> LSB
78 // (0rrr,rrgg,gggb,bbbb)
79 // big endian | byte 0 | byte 1 |
80 // little endian | byte 1 | byte 0 |
81 RK_U16 val = (((R >> 3) & 0x1f) << 10) |
82 (((G >> 3) & 0x1f) << 5) |
83 (((B >> 3) & 0x1f) << 0);
84 if (be) {
85 p[0] = (val >> 8) & 0xff;
86 p[1] = (val >> 0) & 0xff;
87 } else {
88 p[0] = (val >> 0) & 0xff;
89 p[1] = (val >> 8) & 0xff;
90 }
91 }
92
fill_MPP_FMT_BGR555(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)93 static void fill_MPP_FMT_BGR555(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
94 // MPP_FMT_BGR555 = tmedia: bgr555be
95 // 16 bit pixel MSB --------> LSB
96 // (0bbb,bbgg,gggr,rrrr)
97 // big endian | byte 0 | byte 1 |
98 // little endian | byte 1 | byte 0 |
99 RK_U16 val = (((R >> 3) & 0x1f) << 0) |
100 (((G >> 3) & 0x1f) << 5) |
101 (((B >> 3) & 0x1f) << 10);
102 if (be) {
103 p[0] = (val >> 8) & 0xff;
104 p[1] = (val >> 0) & 0xff;
105 } else {
106 p[0] = (val >> 0) & 0xff;
107 p[1] = (val >> 8) & 0xff;
108 }
109 }
110
fill_MPP_FMT_RGB444(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)111 static void fill_MPP_FMT_RGB444(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
112 // MPP_FMT_RGB444 = tmedia: rgb444be
113 // 16 bit pixel MSB --------> LSB
114 // (0000,rrrr,gggg,bbbb)
115 // big endian | byte 0 | byte 1 |
116 // little endian | byte 1 | byte 0 |
117 RK_U16 val = (((R >> 4) & 0xf) << 8) |
118 (((G >> 4) & 0xf) << 4) |
119 (((B >> 4) & 0xf) << 0);
120 if (be) {
121 p[0] = (val >> 8) & 0xff;
122 p[1] = (val >> 0) & 0xff;
123 } else {
124 p[0] = (val >> 0) & 0xff;
125 p[1] = (val >> 8) & 0xff;
126 }
127 }
128
fill_MPP_FMT_BGR444(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)129 static void fill_MPP_FMT_BGR444(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
130 // MPP_FMT_BGR444 = tmedia: bgr444be
131 // 16 bit pixel MSB --------> LSB
132 // (0000,bbbb,gggg,rrrr)
133 // big endian | byte 0 | byte 1 |
134 // little endian | byte 1 | byte 0 |
135 RK_U16 val = (((R >> 4) & 0xf) << 0) |
136 (((G >> 4) & 0xf) << 4) |
137 (((B >> 4) & 0xf) << 8);
138 if (be) {
139 p[0] = (val >> 8) & 0xff;
140 p[1] = (val >> 0) & 0xff;
141 } else {
142 p[0] = (val >> 0) & 0xff;
143 p[1] = (val >> 8) & 0xff;
144 }
145 }
146
fill_MPP_FMT_RGB888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)147 static void fill_MPP_FMT_RGB888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
148 // MPP_FMT_RGB888
149 // 24 bit pixel MSB --------> LSB
150 // (rrrr,rrrr,gggg,gggg,bbbb,bbbb)
151 // big endian | byte 0 | byte 1 | byte 2 |
152 // little endian | byte 2 | byte 1 | byte 0 |
153 if (be) {
154 p[0] = R;
155 p[1] = G;
156 p[2] = B;
157 } else {
158 p[0] = B;
159 p[1] = G;
160 p[2] = R;
161 }
162 }
163
fill_MPP_FMT_BGR888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)164 static void fill_MPP_FMT_BGR888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
165 // MPP_FMT_BGR888
166 // 24 bit pixel MSB --------> LSB
167 // (bbbb,bbbb,gggg,gggg,rrrr,rrrr)
168 // big endian | byte 0 | byte 1 | byte 2 |
169 // little endian | byte 2 | byte 1 | byte 0 |
170 if (be) {
171 p[0] = B;
172 p[1] = G;
173 p[2] = R;
174 } else {
175 p[0] = R;
176 p[1] = G;
177 p[2] = B;
178 }
179 }
180
fill_MPP_FMT_RGB101010(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)181 static void fill_MPP_FMT_RGB101010(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
182 // MPP_FMT_RGB101010
183 // 32 bit pixel MSB --------> LSB
184 // (00rr,rrrr,rrrr,gggg,gggg,ggbb,bbbb,bbbb)
185 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
186 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
187 RK_U32 val = (((R * 4) & 0x3ff) << 20) |
188 (((G * 4) & 0x3ff) << 10) |
189 (((B * 4) & 0x3ff) << 0);
190 if (be) {
191 p[0] = (val >> 24) & 0xff;
192 p[1] = (val >> 16) & 0xff;
193 p[2] = (val >> 8) & 0xff;
194 p[3] = (val >> 0) & 0xff;
195 } else {
196 p[0] = (val >> 0) & 0xff;
197 p[1] = (val >> 8) & 0xff;
198 p[2] = (val >> 16) & 0xff;
199 p[3] = (val >> 24) & 0xff;
200 }
201 }
202
fill_MPP_FMT_BGR101010(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)203 static void fill_MPP_FMT_BGR101010(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
204 // MPP_FMT_BGR101010
205 // 32 bit pixel MSB --------> LSB
206 // (00bb,bbbb,bbbb,gggg,gggg,ggrr,rrrr,rrrr)
207 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
208 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
209 RK_U32 val = (((R * 4) & 0x3ff) << 0) |
210 (((G * 4) & 0x3ff) << 10) |
211 (((B * 4) & 0x3ff) << 20);
212 if (be) {
213 p[0] = (val >> 24) & 0xff;
214 p[1] = (val >> 16) & 0xff;
215 p[2] = (val >> 8) & 0xff;
216 p[3] = (val >> 0) & 0xff;
217 } else {
218 p[0] = (val >> 0) & 0xff;
219 p[1] = (val >> 8) & 0xff;
220 p[2] = (val >> 16) & 0xff;
221 p[3] = (val >> 24) & 0xff;
222 }
223 }
224
fill_MPP_FMT_ARGB8888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)225 static void fill_MPP_FMT_ARGB8888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
226 // MPP_FMT_ARGB8888
227 // 32 bit pixel MSB --------> LSB
228 // (XXXX,XXXX,rrrr,rrrr,gggg,gggg,bbbb,bbbb)
229 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
230 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
231 if (be) {
232 p[0] = 0xff;
233 p[1] = R;
234 p[2] = G;
235 p[3] = B;
236 } else {
237 p[0] = B;
238 p[1] = G;
239 p[2] = R;
240 p[3] = 0xff;
241 }
242 }
243
fill_MPP_FMT_ABGR8888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)244 static void fill_MPP_FMT_ABGR8888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
245 // MPP_FMT_ABGR8888
246 // 32 bit pixel MSB --------> LSB
247 // (XXXX,XXXX,bbbb,bbbb,gggg,gggg,rrrr,rrrr)
248 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
249 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
250 if (be) {
251 p[0] = 0xff;
252 p[1] = B;
253 p[2] = G;
254 p[3] = R;
255 } else {
256 p[0] = R;
257 p[1] = G;
258 p[2] = B;
259 p[3] = 0xff;
260 }
261 }
262
fill_MPP_FMT_BGRA8888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)263 static void fill_MPP_FMT_BGRA8888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
264 // MPP_FMT_BGRA8888
265 // 32 bit pixel MSB --------> LSB
266 // (bbbb,bbbb,gggg,gggg,rrrr,rrrr,XXXX,XXXX)
267 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
268 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
269 if (be) {
270 p[0] = B;
271 p[1] = G;
272 p[2] = R;
273 p[3] = 0xff;
274 } else {
275 p[0] = 0xff;
276 p[1] = R;
277 p[2] = G;
278 p[3] = B;
279 }
280 }
281
fill_MPP_FMT_RGBA8888(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)282 static void fill_MPP_FMT_RGBA8888(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
283 // MPP_FMT_RGBA8888
284 // 32 bit pixel MSB --------> LSB
285 // (rrrr,rrrr,gggg,gggg,bbbb,bbbb,XXXX,XXXX)
286 // big endian | byte 0 | byte 1 | byte 2 | byte 3 |
287 // little endian | byte 3 | byte 2 | byte 1 | byte 0 |
288 if (be) {
289 p[0] = R;
290 p[1] = G;
291 p[2] = B;
292 p[3] = 0xff;
293 } else {
294 p[0] = 0xff;
295 p[1] = B;
296 p[2] = G;
297 p[3] = R;
298 }
299 }
300
fill_MPP_FMT_ARGB1555(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)301 static void fill_MPP_FMT_ARGB1555(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
302 RK_U8 r1, g1, b1, a1;
303 RK_U16 *u16p = reinterpret_cast<RK_U16 *>(p);
304
305 a1 = 1;
306 r1 = g1 = b1 = 0;
307 r1 = R >> 3;
308 g1 = G >> 3;
309 b1 = B >> 3;
310
311 if (be) {
312 *u16p = a1 | (r1 << 1) | (g1 << 6) | (b1 << 11);
313 } else {
314 *u16p = (a1 << 15) + (r1 << 10) | (g1 << 5) | b1;
315 }
316 }
317
fill_MPP_FMT_ABGR1555(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)318 static void fill_MPP_FMT_ABGR1555(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
319 RK_U8 r1, g1, b1, a1;
320 RK_U16 *u16p = reinterpret_cast<RK_U16 *>(p);
321
322 a1 = 1;
323 r1 = g1 = b1 = 0;
324 r1 = R >> 3;
325 g1 = G >> 3;
326 b1 = B >> 3;
327
328 if (be) {
329 *u16p = a1 | (b1 << 1) | (g1 << 6) | (r1 << 11);
330 } else {
331 *u16p = (a1 << 15) + (b1 << 10) | (g1 << 5) | r1;
332 }
333 }
334
fill_MPP_FMT_ARGB4444(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)335 static void fill_MPP_FMT_ARGB4444(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
336 RK_U16 val = (0xf << 12) |
337 (((R >> 4) & 0xf) << 8) |
338 (((G >> 4) & 0xf) << 4) |
339 (((B >> 4) & 0xf) << 0);
340 if (be) {
341 p[0] = (val >> 8) & 0xff;
342 p[1] = (val >> 0) & 0xff;
343 } else {
344 p[0] = (val >> 0) & 0xff;
345 p[1] = (val >> 8) & 0xff;
346 }
347 }
348
fill_MPP_FMT_ABGR4444(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)349 static void fill_MPP_FMT_ABGR4444(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
350 RK_U16 val = (((R >> 4) & 0xf) << 0) |
351 (((G >> 4) & 0xf) << 4) |
352 (((B >> 4) & 0xf) << 8) |
353 (0xf << 12);
354 if (be) {
355 p[0] = (val >> 8) & 0xff;
356 p[1] = (val >> 0) & 0xff;
357 } else {
358 p[0] = (val >> 0) & 0xff;
359 p[1] = (val >> 8) & 0xff;
360 }
361 }
362
fill_MPP_FMT_BGRA4444(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)363 static void fill_MPP_FMT_BGRA4444(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
364 RK_U8 r1, g1, b1, a1;
365 RK_U16 *u16p = reinterpret_cast<RK_U16 *>(p);
366
367 a1 = 0xf;
368 r1 = g1 = b1 = 0;
369 r1 = R >> 4;
370 g1 = G >> 4;
371 b1 = B >> 4;
372
373 if (be) {
374 *u16p = b1 | (g1 << 4) | (r1 << 8) | (a1 << 12);
375 } else {
376 *u16p = (b1 << 12) + (g1 << 8) | (r1 << 4) | a1;
377 }
378 }
379
fill_MPP_FMT_RGBA5551(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)380 static void fill_MPP_FMT_RGBA5551(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
381 RK_U8 r1, g1, b1, a1;
382 RK_U16 *u16p = reinterpret_cast<RK_U16 *>(p);
383
384 a1 = 1;
385 r1 = g1 = b1 = 0;
386 r1 = R >> 3;
387 g1 = G >> 3;
388 b1 = B >> 3;
389
390 if (be) {
391 *u16p = r1 | (g1 << 5) | (b1 << 10)| (a1 << 15);
392 } else {
393 *u16p = (r1 << 11) | (g1 << 6) | (b1 << 1) | a1;
394 }
395 }
396
fill_MPP_FMT_BGRA5551(RK_U8 * p,RK_U32 R,RK_U32 G,RK_U32 B,RK_U32 be)397 static void fill_MPP_FMT_BGRA5551(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be) {
398 RK_U8 r1, g1, b1, a1;
399 RK_U16 *u16p = reinterpret_cast<RK_U16 *>(p);
400
401 a1 = 1;
402 r1 = g1 = b1 = 0;
403 r1 = R >> 3;
404 g1 = G >> 3;
405 b1 = B >> 3;
406
407 if (be) {
408 *u16p = b1 | (g1 << 5) | (r1 << 10) | (a1 << 15);
409 } else {
410 *u16p = (b1 << 11) + (g1 << 6) | (r1 << 1) | a1;
411 }
412 }
413
414
415 typedef void (*FillRgbFunc)(RK_U8 *p, RK_U32 R, RK_U32 G, RK_U32 B, RK_U32 be);
416
417 FillRgbFunc fill_rgb_funcs[] = {
418 fill_MPP_FMT_RGB565,
419 fill_MPP_FMT_BGR565,
420 fill_MPP_FMT_RGB555,
421 fill_MPP_FMT_BGR555,
422 fill_MPP_FMT_RGB444,
423 fill_MPP_FMT_BGR444,
424 fill_MPP_FMT_RGB888,
425 fill_MPP_FMT_BGR888,
426 fill_MPP_FMT_RGB101010,
427 fill_MPP_FMT_BGR101010,
428 fill_MPP_FMT_ARGB1555,
429 fill_MPP_FMT_ABGR1555,
430 fill_MPP_FMT_ARGB4444,
431 fill_MPP_FMT_ABGR4444,
432 RK_NULL,
433 RK_NULL,
434 fill_MPP_FMT_ARGB8888,
435 fill_MPP_FMT_ABGR8888,
436 fill_MPP_FMT_BGRA8888,
437 fill_MPP_FMT_RGBA8888,
438 fill_MPP_FMT_RGBA5551,
439 fill_MPP_FMT_BGRA5551,
440 fill_MPP_FMT_BGRA4444
441 };
442
util_check_stride_by_pixel(RK_S32 workaround,RK_S32 width,RK_S32 hor_stride,RK_S32 pixel_size)443 static RK_S32 util_check_stride_by_pixel(RK_S32 workaround, RK_S32 width,
444 RK_S32 hor_stride, RK_S32 pixel_size) {
445 if (!workaround && hor_stride < width * pixel_size) {
446 RK_LOGW("warning: stride by bytes %d is smarller than width %d mutiple by pixel size %d",
447 hor_stride, width, pixel_size);
448 RK_LOGW("multiple stride %d by pixel size %d and set new byte stride to %d",
449 hor_stride, pixel_size, hor_stride * pixel_size);
450 workaround = 1;
451 }
452
453 return workaround;
454 }
455
util_check_8_pixel_aligned(RK_S32 workaround,RK_S32 hor_stride,RK_S32 pixel_aign,RK_S32 pixel_size,const char * fmt_name)456 static RK_S32 util_check_8_pixel_aligned(RK_S32 workaround, RK_S32 hor_stride,
457 RK_S32 pixel_aign, RK_S32 pixel_size,
458 const char *fmt_name) {
459 if (!workaround && hor_stride != RK_ALIGN(hor_stride, pixel_aign * pixel_size)) {
460 RK_LOGW("warning: vepu only support 8 aligned horizontal stride in pixel for %s with pixel size %d",
461 fmt_name, pixel_size);
462 RK_LOGW("set byte stride to %d to match the requirement",
463 RK_ALIGN(hor_stride, pixel_aign * pixel_size));
464 workaround = 1;
465 }
466
467 return workaround;
468 }
469
TEST_COMM_FillImage(RK_U8 * buf,RK_U32 width,RK_U32 height,RK_U32 hor_stride,RK_U32 ver_stride,PIXEL_FORMAT_E fmt,RK_U32 frame_count)470 RK_S32 TEST_COMM_FillImage(RK_U8 *buf, RK_U32 width, RK_U32 height,
471 RK_U32 hor_stride, RK_U32 ver_stride, PIXEL_FORMAT_E fmt,
472 RK_U32 frame_count) {
473 RK_S32 ret = RK_SUCCESS;
474 RK_U8 *buf_y = buf;
475 RK_U8 *buf_c = buf + hor_stride * ver_stride;
476 RK_U32 x, y, i;
477 static RK_S32 is_pixel_stride = 0;
478 static RK_S32 not_8_pixel = 0;
479
480 switch (fmt) {
481 case RK_FMT_YUV420SP : {
482 RK_U8 *p = buf_y;
483
484 for (y = 0; y < height; y++, p += hor_stride) {
485 for (x = 0; x < width; x++) {
486 p[x] = x + y + frame_count * 3;
487 }
488 }
489
490 p = buf + hor_stride * ver_stride;
491 for (y = 0; y < height / 2; y++, p += hor_stride) {
492 for (x = 0; x < width / 2; x++) {
493 p[x * 2 + 0] = 128 + y + frame_count * 2;
494 p[x * 2 + 1] = 64 + x + frame_count * 5;
495 }
496 }
497 } break;
498 case RK_FMT_YUV422SP : {
499 RK_U8 *p = buf_y;
500
501 for (y = 0; y < height; y++, p += hor_stride) {
502 for (x = 0; x < width; x++) {
503 p[x] = x + y + frame_count * 3;
504 }
505 }
506
507 p = buf + hor_stride * ver_stride;
508 for (y = 0; y < height; y++, p += hor_stride) {
509 for (x = 0; x < width / 2; x++) {
510 p[x * 2 + 0] = 128 + y / 2 + frame_count * 2;
511 p[x * 2 + 1] = 64 + x + frame_count * 5;
512 }
513 }
514 } break;
515 case RK_FMT_YUV420P : {
516 RK_U8 *p = buf_y;
517
518 for (y = 0; y < height; y++, p += hor_stride) {
519 for (x = 0; x < width; x++) {
520 p[x] = x + y + frame_count * 3;
521 }
522 }
523
524 p = buf_c;
525 for (y = 0; y < height / 2; y++, p += hor_stride / 2) {
526 for (x = 0; x < width / 2; x++) {
527 p[x] = 128 + y + frame_count * 2;
528 }
529 }
530
531 p = buf_c + hor_stride * ver_stride / 4;
532 for (y = 0; y < height / 2; y++, p += hor_stride / 2) {
533 for (x = 0; x < width / 2; x++) {
534 p[x] = 64 + x + frame_count * 5;
535 }
536 }
537 } break;
538 case RK_FMT_YUV420SP_VU : {
539 RK_U8 *p = buf_y;
540
541 for (y = 0; y < height; y++, p += hor_stride) {
542 for (x = 0; x < width; x++) {
543 p[x] = x + y + frame_count * 3;
544 }
545 }
546
547 p = buf_c;
548 for (y = 0; y < height / 2; y++, p += hor_stride) {
549 for (x = 0; x < width / 2; x++) {
550 p[x * 2 + 1] = 128 + y + frame_count * 2;
551 p[x * 2 + 0] = 64 + x + frame_count * 5;
552 }
553 }
554 } break;
555 case RK_FMT_YUV422P : {
556 RK_U8 *p = buf_y;
557
558 for (y = 0; y < height; y++, p += hor_stride) {
559 for (x = 0; x < width; x++) {
560 p[x] = x + y + frame_count * 3;
561 }
562 }
563
564 p = buf_c;
565 for (y = 0; y < height; y++, p += hor_stride / 2) {
566 for (x = 0; x < width / 2; x++) {
567 p[x] = 128 + y / 2 + frame_count * 2;
568 }
569 }
570
571 p = buf_c + hor_stride * ver_stride / 2;
572 for (y = 0; y < height; y++, p += hor_stride / 2) {
573 for (x = 0; x < width / 2; x++) {
574 p[x] = 64 + x + frame_count * 5;
575 }
576 }
577 } break;
578 case RK_FMT_YUV422SP_VU : {
579 RK_U8 *p = buf_y;
580
581 for (y = 0; y < height; y++, p += hor_stride) {
582 for (x = 0; x < width; x++) {
583 p[x] = x + y + frame_count * 3;
584 }
585 }
586
587 p = buf_c;
588 for (y = 0; y < height; y++, p += hor_stride) {
589 for (x = 0; x < width / 2; x++) {
590 p[x * 2 + 1] = 128 + y / 2 + frame_count * 2;
591 p[x * 2 + 0] = 64 + x + frame_count * 5;
592 }
593 }
594 } break;
595 case RK_FMT_YUV422_YUYV : {
596 RK_U8 *p = buf_y;
597
598 for (y = 0; y < height; y++, p += hor_stride * 2) {
599 for (x = 0; x < width / 2; x++) {
600 p[x * 4 + 0] = x * 2 + 0 + y + frame_count * 3;
601 p[x * 4 + 2] = x * 2 + 1 + y + frame_count * 3;
602 p[x * 4 + 1] = 128 + y / 2 + frame_count * 2;
603 p[x * 4 + 3] = 64 + x + frame_count * 5;
604 }
605 }
606 } break;
607 case RK_FMT_YUV422_YVYU : {
608 RK_U8 *p = buf_y;
609
610 for (y = 0; y < height; y++, p += hor_stride * 2) {
611 for (x = 0; x < width / 2; x++) {
612 p[x * 4 + 0] = x * 2 + 0 + y + frame_count * 3;
613 p[x * 4 + 2] = x * 2 + 1 + y + frame_count * 3;
614 p[x * 4 + 3] = 128 + y / 2 + frame_count * 2;
615 p[x * 4 + 1] = 64 + x + frame_count * 5;
616 }
617 }
618 } break;
619 case RK_FMT_YUV422_UYVY : {
620 RK_U8 *p = buf_y;
621
622 for (y = 0; y < height; y++, p += hor_stride * 2) {
623 for (x = 0; x < width / 2; x++) {
624 p[x * 4 + 1] = x * 2 + 0 + y + frame_count * 3;
625 p[x * 4 + 3] = x * 2 + 1 + y + frame_count * 3;
626 p[x * 4 + 0] = 128 + y / 2 + frame_count * 2;
627 p[x * 4 + 2] = 64 + x + frame_count * 5;
628 }
629 }
630 } break;
631 case RK_FMT_YUV422_VYUY : {
632 RK_U8 *p = buf_y;
633
634 for (y = 0; y < height; y++, p += hor_stride * 2) {
635 for (x = 0; x < width / 2; x++) {
636 p[x * 4 + 1] = x * 2 + 0 + y + frame_count * 3;
637 p[x * 4 + 3] = x * 2 + 1 + y + frame_count * 3;
638 p[x * 4 + 2] = 128 + y / 2 + frame_count * 2;
639 p[x * 4 + 0] = 64 + x + frame_count * 5;
640 }
641 }
642 } break;
643 case RK_FMT_YUV400SP : {
644 RK_U8 *p = buf_y;
645
646 for (y = 0; y < height; y++, p += hor_stride) {
647 for (x = 0; x < width; x++) {
648 p[x] = x + y + frame_count * 3;
649 }
650 }
651 } break;
652 case RK_FMT_RGB565 :
653 case RK_FMT_BGR565 :
654 case RK_FMT_RGB555 :
655 case RK_FMT_BGR555 :
656 case RK_FMT_RGB444 :
657 case RK_FMT_BGR444 :
658 case RK_FMT_ARGB1555 :
659 case RK_FMT_ABGR1555 :
660 case RK_FMT_RGBA5551 :
661 case RK_FMT_BGRA5551 :
662 case RK_FMT_ARGB4444 :
663 case RK_FMT_ABGR4444 :
664 case RK_FMT_BGRA4444 : {
665 RK_U8 *p = buf_y;
666 RK_U32 pix_w = 2;
667 FillRgbFunc fill = fill_rgb_funcs[fmt - RK_VIDEO_FMT_RGB];
668
669 if (util_check_stride_by_pixel(is_pixel_stride, width, hor_stride, pix_w)) {
670 hor_stride *= pix_w;
671 is_pixel_stride = 1;
672 }
673
674 if (util_check_8_pixel_aligned(not_8_pixel, hor_stride,
675 8, pix_w, "16bit RGB")) {
676 hor_stride = RK_ALIGN(hor_stride, 16);
677 not_8_pixel = 1;
678 }
679
680 for (y = 0; y < height; y++, p += hor_stride) {
681 for (x = 0, i = 0; x < width; x++, i += pix_w) {
682 RK_U32 R, G, B;
683
684 get_rgb_color(&R, &G, &B, x, y, frame_count);
685 fill(p + i, R, G, B, 1);
686 }
687 }
688 } break;
689 case RK_FMT_RGB101010 :
690 case RK_FMT_BGR101010 :
691 case RK_FMT_ARGB8888 :
692 case RK_FMT_ABGR8888 :
693 case RK_FMT_BGRA8888 :
694 case RK_FMT_RGBA8888 : {
695 RK_U8 *p = buf_y;
696 RK_U32 pix_w = 4;
697 FillRgbFunc fill = fill_rgb_funcs[fmt - RK_VIDEO_FMT_RGB];
698
699 if (util_check_stride_by_pixel(is_pixel_stride, width, hor_stride, pix_w)) {
700 hor_stride *= pix_w;
701 is_pixel_stride = 1;
702 }
703
704 if (util_check_8_pixel_aligned(not_8_pixel, hor_stride,
705 8, pix_w, "32bit RGB")) {
706 hor_stride = RK_ALIGN(hor_stride, 32);
707 not_8_pixel = 1;
708 }
709
710 for (y = 0; y < height; y++, p += hor_stride) {
711 for (x = 0, i = 0; x < width; x++, i += pix_w) {
712 RK_U32 R, G, B;
713
714 get_rgb_color(&R, &G, &B, x, y, frame_count);
715 fill(p + i, R, G, B, 1);
716 }
717 }
718 } break;
719 case RK_FMT_BGR888 :
720 case RK_FMT_RGB888 : {
721 RK_U8 *p = buf_y;
722 RK_U32 pix_w = 3;
723 FillRgbFunc fill = fill_rgb_funcs[fmt - RK_VIDEO_FMT_RGB];
724
725 if (util_check_stride_by_pixel(is_pixel_stride, width, hor_stride, pix_w)) {
726 hor_stride *= pix_w;
727 is_pixel_stride = 1;
728 }
729
730 if (util_check_8_pixel_aligned(not_8_pixel, hor_stride,
731 8, pix_w, "24bit RGB")) {
732 hor_stride = RK_ALIGN(hor_stride, 24);
733 not_8_pixel = 1;
734 }
735
736 for (y = 0; y < height; y++, p += hor_stride) {
737 for (x = 0, i = 0; x < width; x++, i += pix_w) {
738 RK_U32 R, G, B;
739
740 get_rgb_color(&R, &G, &B, x, y, frame_count);
741 fill(p + i, R, G, B, 1);
742 }
743 }
744 } break;
745 default : {
746 RK_LOGE("filling function do not support type %d\n", fmt);
747 ret = -1;
748 } break;
749 }
750 return ret;
751 }
752
TEST_COMM_CompareImageFuzzy(RK_U8 * pu8Src,RK_U8 * pu8Dst,RK_U32 u32Stride,RK_U32 u32Width,RK_U32 u32Height,RK_DOUBLE dThreshold)753 RK_BOOL TEST_COMM_CompareImageFuzzy(
754 RK_U8 *pu8Src, RK_U8 *pu8Dst, RK_U32 u32Stride,
755 RK_U32 u32Width, RK_U32 u32Height, RK_DOUBLE dThreshold) {
756 RK_U8 *pu8TmpSrc = pu8Src;
757 RK_U8 *pu8TmpDst = pu8Dst;
758 RK_U32 u32LineDiffCnt = 0;
759 RK_U32 u32MaxLineDiffCnt = 0;
760 RK_U32 u32TotalDiffCnt = 0;
761 RK_DOUBLE dAvgDiffRate = 0.0f;
762 RK_U32 u32EffectStride = (u32Stride / u32Width) * u32Width;
763
764 for (RK_U32 i = 0; i < u32Height; i++) {
765 for (RK_U32 j = 0; j < u32EffectStride; j++) {
766 if (abs(pu8TmpSrc[i * u32Stride +j] - pu8TmpDst[i * u32Stride +j]) > 0x20) {
767 u32LineDiffCnt++;
768 }
769 }
770 if (u32LineDiffCnt > u32MaxLineDiffCnt) {
771 u32MaxLineDiffCnt = u32LineDiffCnt;
772 }
773 u32TotalDiffCnt += u32LineDiffCnt;
774 u32LineDiffCnt = 0;
775 }
776
777 dAvgDiffRate = (RK_DOUBLE)u32TotalDiffCnt / u32EffectStride / u32Height;
778
779 RK_LOGI("max line diff(%d), stride(%d), diff rate act(%f) VS exp(%f)",
780 u32MaxLineDiffCnt, u32EffectStride,
781 (RK_DOUBLE)u32MaxLineDiffCnt / u32EffectStride, dThreshold * 2);
782 RK_LOGI("total pixel diff(%d), pixel number(%d), diff rate act(%f) VS exp(%f)",
783 u32TotalDiffCnt, u32EffectStride * u32Height, dAvgDiffRate, dThreshold / 2);
784
785 if (dThreshold * 2 < (RK_DOUBLE)u32MaxLineDiffCnt / u32EffectStride
786 && dThreshold / 2 < dAvgDiffRate) {
787 return RK_TRUE;
788 } else {
789 return RK_FALSE;
790 }
791 }
792