1 /*
2 * Copyright 2015 Rockchip Electronics Co. LTD
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #define MODULE_TAG "vepu_common"
17
18 #include <string.h>
19
20 #include "mpp_mem.h"
21 #include "mpp_debug.h"
22 #include "mpp_common.h"
23 #include "mpp_frame.h"
24
25 #include "vepu_common.h"
26
27 static VepuFormatCfg vepu_yuv_cfg[MPP_FMT_YUV_BUTT] = {
28 //MPP_FMT_YUV420SP
29 { .format = VEPU_FMT_YUV420SEMIPLANAR, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
30 //MPP_FMT_YUV420SP_10BIT
31 { .format = VEPU_FMT_BUTT, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
32 //MPP_FMT_YUV422SP
33 { .format = VEPU_FMT_BUTT, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
34 //MPP_FMT_YUV422SP_10BIT
35 { .format = VEPU_FMT_BUTT, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
36 //MPP_FMT_YUV420P
37 { .format = VEPU_FMT_YUV420PLANAR, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
38 //MPP_FMT_YUV420SP_VU
39 { .format = VEPU_FMT_BUTT, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
40 //MPP_FMT_YUV422P
41 { .format = VEPU_FMT_BUTT, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
42 //MPP_FMT_YUV422SP_VU
43 { .format = VEPU_FMT_BUTT, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
44 //MPP_FMT_YUV422_YUYV
45 { .format = VEPU_FMT_YUYV422INTERLEAVED, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
46 //MPP_FMT_YUV422_YVYU
47 { .format = VEPU_FMT_BUTT, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
48 //MPP_FMT_YUV422_UYVY
49 { .format = VEPU_FMT_UYVY422INTERLEAVED, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
50 //MPP_FMT_YUV422_VYUY
51 { .format = VEPU_FMT_BUTT, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
52 //MPP_FMT_YUV400
53 { .format = VEPU_FMT_BUTT, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
54 //MPP_FMT_YUV440SP
55 { .format = VEPU_FMT_BUTT, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
56 //MPP_FMT_YUV411SP
57 { .format = VEPU_FMT_BUTT, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
58 //MPP_FMT_YUV444SP
59 { .format = VEPU_FMT_BUTT, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
60 };
61
62 static VepuFormatCfg vepu_rgb_cfg[MPP_FMT_RGB_BUTT - MPP_FRAME_FMT_RGB] = {
63 //MPP_FMT_RGB565, ff: rgb565be, bin(rrrr,rggg,gggb,bbbb) mem MSB-->LSB(gggb,bbbb,rrrr,rggg)
64 { .format = VEPU_FMT_RGB565, .r_mask = 15, .g_mask = 10, .b_mask = 4, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
65 //MPP_FMT_BGR565, ff: bgr565be, bin(bbbb,bggg,gggr,rrrr) mem MSB-->LSB(gggr,rrrr,bbbb,bggg)
66 { .format = VEPU_FMT_RGB565, .r_mask = 4, .g_mask = 10, .b_mask = 15, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
67 //MPP_FMT_RGB555, ff: rgb555be, bin(0rrr,rrgg,gggb,bbbb) mem MSB-->LSB(gggb,bbbb,0rrr,rrgg)
68 { .format = VEPU_FMT_RGB555, .r_mask = 14, .g_mask = 9, .b_mask = 4, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
69 //MPP_FMT_BGR555, ff: bgr555be, bin(0bbb,bbgg,gggr,rrrr) mem MSB-->LSB(gggr,rrrr,0bbb,bbgg)
70 { .format = VEPU_FMT_RGB555, .r_mask = 4, .g_mask = 9, .b_mask = 14, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
71 //MPP_FMT_RGB444, ff: rgb444be, bin(0000,rrrr,gggg,bbbb)
72 { .format = VEPU_FMT_RGB444, .r_mask = 11, .g_mask = 7, .b_mask = 3, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
73 //MPP_FMT_BGR444, ff: bgr444be, bin(0000,bbbb,gggg,rrrr)
74 { .format = VEPU_FMT_RGB444, .r_mask = 3, .g_mask = 7, .b_mask = 11, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
75 //MPP_FMT_RGB888, ff: rgb24, bin(rrrr,rrrr,gggg,gggg,bbbb,bbbb)
76 { .format = VEPU_FMT_BUTT, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 0, .swap_16_in = 0, .swap_32_in = 0, },
77 //MPP_FMT_BGR888, ff: bgr24, bin(bbbb,bbbb,gggg,gggg,rrrr,rrrr)
78 { .format = VEPU_FMT_BUTT, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 0, .swap_16_in = 0, .swap_32_in = 0, },
79 //MPP_FMT_RGB101010, bin(00rr,rrrr,rrrr,gggg,gggg,ggbb,bbbb,bbbb)
80 { .format = VEPU_FMT_RGB101010, .r_mask = 29, .g_mask = 19, .b_mask = 9, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
81 //MPP_FMT_BGR101010, bin(00bb,bbbb,bbbb,gggg,gggg,ggrr,rrrr,rrrr)
82 { .format = VEPU_FMT_RGB101010, .r_mask = 9, .g_mask = 19, .b_mask = 29, .swap_8_in = 1, .swap_16_in = 1, .swap_32_in = 1, },
83 //MPP_FMT_ARGB8888, argb, bin(aaaa,aaaa,rrrr,rrrr,gggg,gggg,bbbb,bbbb)
84 { .format = VEPU_FMT_RGB888, .r_mask = 15, .g_mask = 23, .b_mask = 31, .swap_8_in = 0, .swap_16_in = 0, .swap_32_in = 1, },
85 //MPP_FMT_ABGR8888, ffmepg: rgba, bin(aaaa,aaaa,bbbb,bbbb,gggg,gggg,rrrr,rrrr)
86 { .format = VEPU_FMT_RGB888, .r_mask = 31, .g_mask = 23, .b_mask = 15, .swap_8_in = 0, .swap_16_in = 0, .swap_32_in = 1, },
87 //MPP_FMT_BGRA8888, ff: bgra, bin(bbbb,bbbb,gggg,gggg,rrrr,rrrr,aaaa,aaaa)
88 { .format = VEPU_FMT_RGB888, .r_mask = 23, .g_mask = 15, .b_mask = 7, .swap_8_in = 0, .swap_16_in = 0, .swap_32_in = 1, },
89 //MPP_FMT_RGBA8888, ff: rgba, bin(rrrr,rrrr,gggg,gggg,bbbb,bbbb,aaaa,aaaa)
90 { .format = VEPU_FMT_RGB888, .r_mask = 7, .g_mask = 15, .b_mask = 23, .swap_8_in = 0, .swap_16_in = 0, .swap_32_in = 1, },
91 };
92 static VepuFormatCfg vepu_rgb_le_cfg[MPP_FMT_RGB_BUTT - MPP_FRAME_FMT_RGB] = {
93 //for little endian format
94 //MPP_FMT_RGB565LE, ff: rgb565le, bin(gggb,bbbb,rrrr,rggg)
95 { .format = VEPU_FMT_RGB565, .r_mask = 15, .g_mask = 10, .b_mask = 4, .swap_8_in = 0, .swap_16_in = 1, .swap_32_in = 1, },
96 //MPP_FMT_BGR565LE, ff: bgr565le, bin(gggr,rrrr,bbbb,bggg)
97 { .format = VEPU_FMT_RGB565, .r_mask = 4, .g_mask = 10, .b_mask = 15, .swap_8_in = 0, .swap_16_in = 1, .swap_32_in = 1, },
98 //MPP_FMT_RGB555LE, ff: rgb555le, bin(gggb,bbbb,0rrr,rrgg)
99 { .format = VEPU_FMT_RGB555, .r_mask = 14, .g_mask = 9, .b_mask = 4, .swap_8_in = 0, .swap_16_in = 1, .swap_32_in = 1, },
100 //MPP_FMT_BGR555LE, ff: bgr555le, bin(gggr,rrrr,0bbb,bbgg)
101 { .format = VEPU_FMT_RGB555, .r_mask = 4, .g_mask = 9, .b_mask = 14, .swap_8_in = 0, .swap_16_in = 1, .swap_32_in = 1, },
102 //MPP_FMT_RGB444LE, ff: rgb444le, bin(gggg,bbbb,0000,rrrr)
103 { .format = VEPU_FMT_RGB444, .r_mask = 11, .g_mask = 7, .b_mask = 3, .swap_8_in = 0, .swap_16_in = 1, .swap_32_in = 1, },
104 //MPP_FMT_BGR444LE, ff: bgr444le, bin(gggg,rrrr,0000,bbbb)
105 { .format = VEPU_FMT_RGB444, .r_mask = 3, .g_mask = 7, .b_mask = 11, .swap_8_in = 0, .swap_16_in = 1, .swap_32_in = 1, },
106
107 //MPP_FMT_RGB888, ff: rgb24, bin(rrrr,rrrr,gggg,gggg,bbbb,bbbb)
108 { .format = VEPU_FMT_BUTT, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 0, .swap_16_in = 0, .swap_32_in = 0, },
109 //MPP_FMT_BGR888, ff: bgr24, bin(bbbb,bbbb,gggg,gggg,rrrr,rrrr)
110 { .format = VEPU_FMT_BUTT, .r_mask = 0, .g_mask = 0, .b_mask = 0, .swap_8_in = 0, .swap_16_in = 0, .swap_32_in = 0, },
111
112 //MPP_FMT_RGB101010, bin(00rr,rrrr,rrrr,gggg,gggg,ggbb,bbbb,bbbb)
113 { .format = VEPU_FMT_RGB101010, .r_mask = 29, .g_mask = 19, .b_mask = 9, .swap_8_in = 0, .swap_16_in = 0, .swap_32_in = 1, },
114 //MPP_FMT_BGR101010, bin(00bb,bbbb,bbbb,gggg,gggg,ggrr,rrrr,rrrr)
115 { .format = VEPU_FMT_RGB101010, .r_mask = 9, .g_mask = 19, .b_mask = 29, .swap_8_in = 0, .swap_16_in = 0, .swap_32_in = 1, },
116
117 //MPP_FMT_ARGB8888LE, argb, bin(aaaa,aaaa,rrrr,rrrr,gggg,gggg,bbbb,bbbb)
118 { .format = VEPU_FMT_RGB888, .r_mask = 23, .g_mask = 15, .b_mask = 7, .swap_8_in = 0, .swap_16_in = 0, .swap_32_in = 1, },
119 //MPP_FMT_ABGR8888LE, ffmepg: rgba, bin(aaaa,aaaa,bbbb,bbbb,gggg,gggg,rrrr,rrrr)
120 { .format = VEPU_FMT_RGB888, .r_mask = 7, .g_mask = 15, .b_mask = 23, .swap_8_in = 0, .swap_16_in = 0, .swap_32_in = 1, },
121 //MPP_FMT_BGRA8888LE, ff: bgra, bin(bbbb,bbbb,gggg,gggg,rrrr,rrrr,aaaa,aaaa)
122 { .format = VEPU_FMT_RGB888, .r_mask = 15, .g_mask = 23, .b_mask = 31, .swap_8_in = 0, .swap_16_in = 0, .swap_32_in = 1, },
123 //MPP_FMT_RGBA8888LE, ff: rgba, bin(rrrr,rrrr,gggg,gggg,bbbb,bbbb,aaaa,aaaa)
124 { .format = VEPU_FMT_RGB888, .r_mask = 31, .g_mask = 23, .b_mask = 15, .swap_8_in = 0, .swap_16_in = 0, .swap_32_in = 1, },
125 };
126
get_vepu_fmt(VepuFormatCfg * cfg,MppFrameFormat format)127 MPP_RET get_vepu_fmt(VepuFormatCfg *cfg, MppFrameFormat format)
128 {
129 VepuFormatCfg *fmt_cfg = NULL;
130 MPP_RET ret = MPP_OK;
131
132 if (MPP_FRAME_FMT_IS_FBC(format)) {
133 mpp_err_f("unsupport frame format %x\n", format);
134 } else if (MPP_FRAME_FMT_IS_YUV(format)) {
135 if (!MPP_FRAME_FMT_IS_LE(format))
136 fmt_cfg = &vepu_yuv_cfg[format - MPP_FRAME_FMT_YUV];
137 } else if (MPP_FRAME_FMT_IS_RGB(format)) {
138 if (MPP_FRAME_FMT_IS_LE(format)) {
139 fmt_cfg = &vepu_rgb_le_cfg[(format & MPP_FRAME_FMT_MASK) - MPP_FRAME_FMT_RGB];
140 } else
141 fmt_cfg = &vepu_rgb_cfg[format - MPP_FRAME_FMT_RGB];
142 } else {
143 memset(cfg, 0, sizeof(*cfg));
144 cfg->format = VEPU_FMT_BUTT;
145 }
146
147 if (fmt_cfg && fmt_cfg->format != VEPU_FMT_BUTT) {
148 memcpy(cfg, fmt_cfg, sizeof(*cfg));
149 } else {
150 mpp_err_f("unsupport frame format %x\n", format);
151 cfg->format = VEPU_FMT_BUTT;
152 ret = MPP_NOK;
153 }
154
155 return ret;
156 }
157
check_stride_by_pixel(RK_S32 workaround,RK_S32 width,RK_S32 hor_stride,RK_S32 pixel_size)158 static RK_S32 check_stride_by_pixel(RK_S32 workaround, RK_S32 width,
159 RK_S32 hor_stride, RK_S32 pixel_size)
160 {
161 if (!workaround && hor_stride < width * pixel_size) {
162 mpp_log("warning: stride by bytes %d is smarller than width %d mutiple by pixel size %d\n",
163 hor_stride, width, pixel_size);
164 mpp_log("multiple stride %d by pixel size %d and set new byte stride to %d\n",
165 hor_stride, pixel_size, hor_stride * pixel_size);
166 workaround = 1;
167 }
168
169 return workaround;
170 }
171
check_8_pixel_aligned(RK_S32 workaround,RK_S32 hor_stride,RK_S32 pixel_aign,RK_S32 pixel_size,const char * fmt_name)172 static RK_S32 check_8_pixel_aligned(RK_S32 workaround, RK_S32 hor_stride,
173 RK_S32 pixel_aign, RK_S32 pixel_size,
174 const char *fmt_name)
175 {
176 if (!workaround && hor_stride != MPP_ALIGN_GEN(hor_stride, pixel_aign * pixel_size)) {
177 mpp_log("warning: vepu only support 8 aligned horizontal stride in pixel for %s with pixel size %d\n",
178 fmt_name, pixel_size);
179 mpp_log("set byte stride to %d to match the requirement\n",
180 MPP_ALIGN_GEN(hor_stride, pixel_aign * pixel_size));
181 workaround = 1;
182 }
183
184 return workaround;
185 }
186
get_vepu_pixel_stride(VepuStrideCfg * cfg,RK_U32 width,RK_U32 stride,MppFrameFormat fmt)187 RK_U32 get_vepu_pixel_stride(VepuStrideCfg *cfg, RK_U32 width, RK_U32 stride, MppFrameFormat fmt)
188 {
189 RK_U32 hor_stride = stride;
190 RK_U32 pixel_size = 1;
191
192 if (cfg->fmt != fmt) {
193 memset(cfg, 0, sizeof(VepuStrideCfg));
194 cfg->fmt = fmt;
195 }
196
197 if (cfg->stride != stride || cfg->width != width) {
198 cfg->not_8_pixel = 0;
199 cfg->is_pixel_stride = 0;
200 cfg->stride = stride;
201 cfg->width = width;
202 }
203
204 switch (fmt & MPP_FRAME_FMT_MASK) {
205 case MPP_FMT_YUV420SP : {
206 if (check_8_pixel_aligned(cfg->not_8_pixel, hor_stride, 8, 1, "YUV420SP")) {
207 hor_stride = MPP_ALIGN(hor_stride, 8);
208 cfg->not_8_pixel = 1;
209 }
210 } break;
211 case MPP_FMT_YUV420P : {
212 if (check_8_pixel_aligned(cfg->not_8_pixel, hor_stride, 16, 1, "YUV420P")) {
213 hor_stride = MPP_ALIGN(hor_stride, 8);
214 cfg->not_8_pixel = 1;
215 }
216 } break;
217 case MPP_FMT_YUV422_YUYV :
218 case MPP_FMT_YUV422_UYVY : {
219 if (check_stride_by_pixel(cfg->is_pixel_stride, cfg->width,
220 hor_stride, 2)) {
221 hor_stride *= 2;
222 cfg->is_pixel_stride = 1;
223 }
224
225 if (check_8_pixel_aligned(cfg->not_8_pixel, hor_stride, 8, 2, "YUV422_interleave")) {
226 hor_stride = MPP_ALIGN(hor_stride, 16);
227 cfg->not_8_pixel = 1;
228 }
229
230 hor_stride /= 2;
231 pixel_size = 2;
232 } break;
233 case MPP_FMT_RGB565 :
234 case MPP_FMT_BGR565 :
235 case MPP_FMT_RGB555 :
236 case MPP_FMT_BGR555 :
237 case MPP_FMT_RGB444 :
238 case MPP_FMT_BGR444 : {
239 if (check_stride_by_pixel(cfg->is_pixel_stride, cfg->width,
240 hor_stride, 2)) {
241 hor_stride *= 2;
242 cfg->is_pixel_stride = 1;
243 }
244
245 if (check_8_pixel_aligned(cfg->not_8_pixel, hor_stride, 8, 2, "32bit RGB")) {
246 hor_stride = MPP_ALIGN(hor_stride, 16);
247 cfg->not_8_pixel = 1;
248 }
249
250 hor_stride /= 2;
251 pixel_size = 2;
252 } break;
253 case MPP_FMT_ARGB8888 :
254 case MPP_FMT_ABGR8888 :
255 case MPP_FMT_RGBA8888 :
256 case MPP_FMT_BGRA8888 :
257 case MPP_FMT_RGB101010 :
258 case MPP_FMT_BGR101010 : {
259 if (check_stride_by_pixel(cfg->is_pixel_stride, cfg->width,
260 hor_stride, 4)) {
261 hor_stride *= 4;
262 cfg->is_pixel_stride = 1;
263 }
264
265 if (check_8_pixel_aligned(cfg->not_8_pixel, hor_stride, 8, 4, "16bit RGB")) {
266 hor_stride = MPP_ALIGN(hor_stride, 32);
267 cfg->not_8_pixel = 1;
268 }
269
270 hor_stride /= 4;
271 pixel_size = 4;
272 } break;
273 default: {
274 mpp_err_f("invalid fmt %d", fmt);
275 }
276 }
277 cfg->pixel_stride = hor_stride;
278 cfg->pixel_size = pixel_size;
279
280 return hor_stride;
281 }
282
get_vepu_offset_cfg(VepuOffsetCfg * cfg)283 MPP_RET get_vepu_offset_cfg(VepuOffsetCfg *cfg)
284 {
285 MPP_RET ret = MPP_OK;
286 MppFrameFormat fmt = cfg->fmt;
287 RK_U32 hor_stride = cfg->hor_stride;
288 RK_U32 ver_stride = cfg->ver_stride;
289 RK_U32 offset_x = cfg->offset_x;
290 RK_U32 offset_y = cfg->offset_y;
291 RK_U32 offset_c = hor_stride * ver_stride;
292
293 memset(cfg->offset_byte, 0, sizeof(cfg->offset_byte));
294 memset(cfg->offset_pixel, 0, sizeof(cfg->offset_pixel));
295
296 if (offset_x || offset_y) {
297 switch (fmt) {
298 case MPP_FMT_YUV420SP : {
299 cfg->offset_byte[0] = offset_y * hor_stride + offset_x;
300 cfg->offset_byte[1] = offset_y / 2 * hor_stride + offset_x + offset_c;
301 } break;
302 case MPP_FMT_YUV420P : {
303 cfg->offset_byte[0] = offset_y * hor_stride + offset_x;
304 cfg->offset_byte[1] = (offset_y / 2) * (hor_stride / 2) + (offset_x / 2) + offset_c;
305 cfg->offset_byte[2] = (offset_y / 2) * (hor_stride / 2) + (offset_x / 2) + offset_c * 5 / 4;
306 } break;
307 case MPP_FMT_YUV422_YUYV :
308 case MPP_FMT_YUV422_UYVY : {
309 mpp_assert((offset_x & 1) == 0);
310 cfg->offset_byte[0] = offset_y * hor_stride + offset_x * 2;
311 } break;
312 case MPP_FMT_RGB565 :
313 case MPP_FMT_BGR565 :
314 case MPP_FMT_RGB555 :
315 case MPP_FMT_BGR555 :
316 case MPP_FMT_RGB444 :
317 case MPP_FMT_BGR444 : {
318 cfg->offset_byte[0] = offset_y * hor_stride + offset_x * 2;
319 } break;
320 case MPP_FMT_RGB101010 :
321 case MPP_FMT_BGR101010 :
322 case MPP_FMT_ARGB8888 :
323 case MPP_FMT_ABGR8888 :
324 case MPP_FMT_BGRA8888 :
325 case MPP_FMT_RGBA8888 : {
326 cfg->offset_byte[0] = offset_y * hor_stride + offset_x * 4;
327 } break;
328 default : {
329 } break;
330 }
331 } else {
332 switch (fmt) {
333 case MPP_FMT_YUV420SP :
334 case MPP_FMT_YUV420P : {
335 RK_U32 offset = hor_stride * ver_stride;
336
337 cfg->offset_byte[1] = offset;
338
339 if (fmt == MPP_FMT_YUV420P)
340 offset = hor_stride * ver_stride * 5 / 4;
341
342 cfg->offset_byte[2] = offset;
343 } break;
344 default : {
345 } break;
346 }
347 }
348
349 return ret;
350 }
351