1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd. */
3
4 #include <linux/delay.h>
5 #include <linux/pm_runtime.h>
6 #include <media/v4l2-common.h>
7 #include <media/v4l2-event.h>
8 #include <media/v4l2-fh.h>
9 #include <media/v4l2-ioctl.h>
10 #include <media/v4l2-subdev.h>
11 #include <media/videobuf2-dma-contig.h>
12 #include <linux/dma-iommu.h>
13 #include <linux/rk-camera-module.h>
14 #include "dev.h"
15 #include "regs.h"
16
17 static inline
to_bridge_buf(struct rkisp_ispp_buf * dbufs)18 struct rkisp_bridge_buf *to_bridge_buf(struct rkisp_ispp_buf *dbufs)
19 {
20 return container_of(dbufs, struct rkisp_bridge_buf, dbufs);
21 }
22
23 /* compatible with MI frame end are triggered before ISP frame end */
reg_buf_wait_for_stats(struct rkisp_bridge_device * dev,struct rkisp_ispp_reg * reg_buf,struct rkisp_isp2x_stat_buffer * tmp_statsbuf)24 static void reg_buf_wait_for_stats(struct rkisp_bridge_device *dev,
25 struct rkisp_ispp_reg *reg_buf,
26 struct rkisp_isp2x_stat_buffer *tmp_statsbuf)
27 {
28 s32 retry = 10;
29
30 do {
31 if (reg_buf->frame_id > tmp_statsbuf->frame_id)
32 usleep_range(1000, 1200);
33 else
34 break;
35 } while (retry-- > 0);
36
37 if (retry < 0)
38 v4l2_err(&dev->sd, "reg id(%d) don't match stats id(%d)\n",
39 reg_buf->frame_id, tmp_statsbuf->frame_id);
40 }
41
dump_dbg_reg(struct rkisp_bridge_device * dev,struct rkisp_ispp_reg * reg_buf)42 static void dump_dbg_reg(struct rkisp_bridge_device *dev, struct rkisp_ispp_reg *reg_buf)
43 {
44 struct rkisp_isp2x_stat_buffer *tmp_statsbuf;
45 struct rkisp_hw_dev *hw = dev->ispdev->hw_dev;
46 u32 offset = 0, size;
47
48 tmp_statsbuf = (struct rkisp_isp2x_stat_buffer *)dev->ispdev->stats_vdev.tmp_statsbuf.vaddr;
49 reg_buf_wait_for_stats(dev, reg_buf, tmp_statsbuf);
50 memset(reg_buf->isp_offset, -1, sizeof(reg_buf->isp_offset));
51 memset(reg_buf->ispp_offset, -1, sizeof(reg_buf->ispp_offset));
52 memset(reg_buf->isp_size, 0, sizeof(reg_buf->isp_offset));
53 memset(reg_buf->isp_stats_size, 0, sizeof(reg_buf->isp_offset));
54 memset(reg_buf->ispp_size, 0, sizeof(reg_buf->ispp_offset));
55 if (rkisp_debug_reg & ISP2X_MODULE_DPCC) {
56 size = 4 + ISP_DPCC0_PDAF_FORWARD_MED - ISP_DPCC0_MODE;
57 reg_buf->isp_size[ISP2X_ID_DPCC] = size;
58 reg_buf->isp_offset[ISP2X_ID_DPCC] = offset;
59 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_DPCC0_MODE, size);
60 offset += size;
61 }
62
63 if (rkisp_debug_reg & ISP2X_MODULE_BLS) {
64 size = 4 + ISP_BLS_D_MEASURED - ISP_BLS_CTRL;
65 reg_buf->isp_size[ISP2X_ID_BLS] = size;
66 reg_buf->isp_offset[ISP2X_ID_BLS] = offset;
67 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_BLS_CTRL, size);
68 offset += size;
69 }
70
71 if (rkisp_debug_reg & ISP2X_MODULE_SDG) {
72 size = 4 + ISP_GAMMA_B_Y_16 - ISP_GAMMA_DX_LO;
73 reg_buf->isp_size[ISP2X_ID_SDG] = size;
74 reg_buf->isp_offset[ISP2X_ID_SDG] = offset;
75 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_GAMMA_DX_LO, size);
76 offset += size;
77 }
78
79 if (rkisp_debug_reg & ISP2X_MODULE_SIHST) {
80 size = 4 + ISP_HIST_HIST3_DBG2 - ISP_HIST_HIST_CTRL;
81 reg_buf->isp_size[ISP2X_ID_SIHST] = size;
82 reg_buf->isp_offset[ISP2X_ID_SIHST] = offset;
83 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_HIST_HIST_CTRL, size);
84 offset += size;
85 }
86
87 if (rkisp_debug_reg & ISP2X_MODULE_LSC) {
88 size = 4 + ISP_LSC_STATUS - ISP_LSC_CTRL;
89 reg_buf->isp_size[ISP2X_ID_LSC] = size;
90 reg_buf->isp_offset[ISP2X_ID_LSC] = offset;
91 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_LSC_CTRL, size);
92 offset += size;
93 }
94
95 if (rkisp_debug_reg & ISP2X_MODULE_AWB_GAIN) {
96 size = 4 + CIF_ISP_AWB_GAIN_RB_V12 - CIF_ISP_AWB_GAIN_G_V12;
97 reg_buf->isp_size[ISP2X_ID_AWB_GAIN] = size;
98 reg_buf->isp_offset[ISP2X_ID_AWB_GAIN] = offset;
99 memcpy_fromio(®_buf->reg[offset], hw->base_addr + CIF_ISP_AWB_GAIN_G_V12, size);
100 offset += size;
101 }
102
103 if (rkisp_debug_reg & ISP2X_MODULE_CCM) {
104 size = 4 + ISP_CCM_BOUND_BIT - ISP_CCM_CTRL;
105 reg_buf->isp_size[ISP2X_ID_CCM] = size;
106 reg_buf->isp_offset[ISP2X_ID_CCM] = offset;
107 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_CCM_CTRL, size);
108 offset += size;
109 }
110
111 if (rkisp_debug_reg & ISP2X_MODULE_GOC) {
112 size = 4 + ISP_GAMMA_OUT_Y40 - ISP_GAMMA_OUT_CTRL;
113 reg_buf->isp_size[ISP2X_ID_GOC] = size;
114 reg_buf->isp_offset[ISP2X_ID_GOC] = offset;
115 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_GAMMA_OUT_CTRL, size);
116 offset += size;
117 }
118
119 if (rkisp_debug_reg & ISP2X_MODULE_CPROC) {
120 size = 4 + CPROC_HUE - CPROC_CTRL;
121 reg_buf->isp_size[ISP2X_ID_CPROC] = size;
122 reg_buf->isp_offset[ISP2X_ID_CPROC] = offset;
123 memcpy_fromio(®_buf->reg[offset], hw->base_addr + CPROC_CTRL, size);
124 offset += size;
125 }
126
127 if (rkisp_debug_reg & ISP2X_MODULE_SIAF) {
128 size = 4 + ISP_AFM_LUM_C - ISP_AFM_CTRL;
129 reg_buf->isp_size[ISP2X_ID_SIAF] = size;
130 reg_buf->isp_offset[ISP2X_ID_SIAF] = offset;
131 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_AFM_CTRL, size);
132 offset += size;
133 }
134
135 if (rkisp_debug_reg & ISP2X_MODULE_SIAWB) {
136 size = 4 + CIF_ISP_AWB_MEAN_V10 - CIF_ISP_AWB_PROP_V10;
137 reg_buf->isp_size[ISP2X_ID_SIAWB] = size;
138 reg_buf->isp_offset[ISP2X_ID_SIAWB] = offset;
139 memcpy_fromio(®_buf->reg[offset], hw->base_addr + CIF_ISP_AWB_PROP_V10, size);
140 offset += size;
141 }
142
143 if (rkisp_debug_reg & ISP2X_MODULE_IE) {
144 size = 4 + CIF_IMG_EFF_SHARPEN - CIF_IMG_EFF_CTRL;
145 reg_buf->isp_size[ISP2X_ID_IE] = size;
146 reg_buf->isp_offset[ISP2X_ID_IE] = offset;
147 memcpy_fromio(®_buf->reg[offset], hw->base_addr + CIF_IMG_EFF_CTRL, size);
148 offset += size;
149 }
150
151 if (rkisp_debug_reg & ISP2X_MODULE_YUVAE) {
152 size = 4 + ISP_YUVAE_RO_DBG3 - ISP_YUVAE_CTRL;
153 reg_buf->isp_size[ISP2X_ID_YUVAE] = size;
154 reg_buf->isp_offset[ISP2X_ID_YUVAE] = offset;
155 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_YUVAE_CTRL, size);
156 offset += size;
157 }
158
159 if (rkisp_debug_reg & ISP2X_MODULE_WDR) {
160 size = 4 + ISP_WDR_BLKMEAN8_ROW9_4TO7 - ISP_WDR_CTRL;
161 reg_buf->isp_size[ISP2X_ID_WDR] = size;
162 reg_buf->isp_offset[ISP2X_ID_WDR] = offset;
163 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_WDR_CTRL, size);
164 offset += size;
165 }
166
167 if (rkisp_debug_reg & ISP2X_MODULE_RK_IESHARP) {
168 size = 4 + CIF_RKSHARP_UV_GAUSS_OTHER_COE33_COE35 - CIF_RKSHARP_CTRL;
169 reg_buf->isp_size[ISP2X_ID_RK_IESHARP] = size;
170 reg_buf->isp_offset[ISP2X_ID_RK_IESHARP] = offset;
171 memcpy_fromio(®_buf->reg[offset], hw->base_addr + CIF_RKSHARP_CTRL, size);
172 offset += size;
173 }
174
175 if (rkisp_debug_reg & ISP2X_MODULE_RAWAF) {
176 size = 4 + ISP_RAWAF_INT_STATE - ISP_RAWAF_CTRL;
177 reg_buf->isp_size[ISP2X_ID_RAWAF] = size;
178 reg_buf->isp_offset[ISP2X_ID_RAWAF] = offset;
179 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_RAWAF_CTRL, size);
180 offset += size;
181
182 size = ISP2X_RAWAF_SUMDATA_NUM * sizeof(tmp_statsbuf->params.rawaf.ramdata[0]);
183 reg_buf->isp_size[ISP2X_ID_RAWAF] += size;
184 reg_buf->isp_stats_size[ISP2X_ID_RAWAF] = size;
185 if (tmp_statsbuf->frame_id == reg_buf->frame_id)
186 memcpy(®_buf->reg[offset], &tmp_statsbuf->params.rawaf.ramdata[0], size);
187 else
188 memset(®_buf->reg[offset], 0, size);
189 offset += size;
190 }
191
192 if (rkisp_debug_reg & ISP2X_MODULE_RAWAE0) {
193 size = 4 + ISP_RAWAE_LITE_RO_DBG2 - ISP_RAWAE_LITE_CTRL;
194 reg_buf->isp_size[ISP2X_ID_RAWAE0] = size;
195 reg_buf->isp_offset[ISP2X_ID_RAWAE0] = offset;
196 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_RAWAE_LITE_CTRL, size);
197 offset += size;
198
199 size = ISP2X_RAWAELITE_MEAN_NUM * sizeof(tmp_statsbuf->params.rawae0.data[0]);
200 reg_buf->isp_size[ISP2X_ID_RAWAE0] += size;
201 reg_buf->isp_stats_size[ISP2X_ID_RAWAE0] = size;
202 if (tmp_statsbuf->frame_id == reg_buf->frame_id)
203 memcpy(®_buf->reg[offset], &tmp_statsbuf->params.rawae0.data[0], size);
204 else
205 memset(®_buf->reg[offset], 0, size);
206 offset += size;
207 }
208
209 if (rkisp_debug_reg & ISP2X_MODULE_RAWAE1) {
210 size = 4 + RAWAE_BIG_RO_DBG3 - RAWAE_BIG_CTRL;
211 reg_buf->isp_size[ISP2X_ID_RAWAE1] = size;
212 reg_buf->isp_offset[ISP2X_ID_RAWAE1] = offset;
213 memcpy_fromio(®_buf->reg[offset], hw->base_addr + RAWAE_BIG1_BASE, size);
214 offset += size;
215
216 size = ISP2X_RAWAEBIG_MEAN_NUM * sizeof(tmp_statsbuf->params.rawae1.data[0]);
217 reg_buf->isp_size[ISP2X_ID_RAWAE1] += size;
218 reg_buf->isp_stats_size[ISP2X_ID_RAWAE1] = size;
219 if (tmp_statsbuf->frame_id == reg_buf->frame_id)
220 memcpy(®_buf->reg[offset], &tmp_statsbuf->params.rawae1.data[0], size);
221 else
222 memset(®_buf->reg[offset], 0, size);
223 offset += size;
224 }
225
226 if (rkisp_debug_reg & ISP2X_MODULE_RAWAE2) {
227 size = 4 + RAWAE_BIG_RO_DBG3 - RAWAE_BIG_CTRL;
228 reg_buf->isp_size[ISP2X_ID_RAWAE2] = size;
229 reg_buf->isp_offset[ISP2X_ID_RAWAE2] = offset;
230 memcpy_fromio(®_buf->reg[offset], hw->base_addr + RAWAE_BIG2_BASE, size);
231 offset += size;
232
233 size = ISP2X_RAWAEBIG_MEAN_NUM * sizeof(tmp_statsbuf->params.rawae2.data[0]);
234 reg_buf->isp_size[ISP2X_ID_RAWAE2] += size;
235 reg_buf->isp_stats_size[ISP2X_ID_RAWAE2] = size;
236 if (tmp_statsbuf->frame_id == reg_buf->frame_id)
237 memcpy(®_buf->reg[offset], &tmp_statsbuf->params.rawae2.data[0], size);
238 else
239 memset(®_buf->reg[offset], 0, size);
240 offset += size;
241 }
242
243 if (rkisp_debug_reg & ISP2X_MODULE_RAWAE3) {
244 size = 4 + RAWAE_BIG_RO_DBG3 - RAWAE_BIG_CTRL;
245 reg_buf->isp_size[ISP2X_ID_RAWAE3] = size;
246 reg_buf->isp_offset[ISP2X_ID_RAWAE3] = offset;
247 memcpy_fromio(®_buf->reg[offset], hw->base_addr + RAWAE_BIG3_BASE, size);
248 offset += size;
249
250 size = ISP2X_RAWAEBIG_MEAN_NUM * sizeof(tmp_statsbuf->params.rawae3.data[0]);
251 reg_buf->isp_size[ISP2X_ID_RAWAE3] += size;
252 reg_buf->isp_stats_size[ISP2X_ID_RAWAE3] = size;
253 if (tmp_statsbuf->frame_id == reg_buf->frame_id)
254 memcpy(®_buf->reg[offset], &tmp_statsbuf->params.rawae3.data[0], size);
255 else
256 memset(®_buf->reg[offset], 0, size);
257 offset += size;
258 }
259
260 if (rkisp_debug_reg & ISP2X_MODULE_RAWAWB) {
261 size = 4 + ISP_RAWAWB_RAM_CTRL - ISP_RAWAWB_CTRL;
262 reg_buf->isp_size[ISP2X_ID_RAWAWB] = size;
263 reg_buf->isp_offset[ISP2X_ID_RAWAWB] = offset;
264 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_RAWAWB_CTRL, size);
265 offset += size;
266
267 size = ISP2X_RAWAWB_RAMDATA_NUM * sizeof(tmp_statsbuf->params.rawawb.ramdata[0]);
268 reg_buf->isp_size[ISP2X_ID_RAWAWB] += size;
269 reg_buf->isp_stats_size[ISP2X_ID_RAWAWB] = size;
270 if (tmp_statsbuf->frame_id == reg_buf->frame_id)
271 memcpy(®_buf->reg[offset], &tmp_statsbuf->params.rawawb.ramdata[0], size);
272 else
273 memset(®_buf->reg[offset], 0, size);
274 offset += size;
275 }
276
277 if (rkisp_debug_reg & ISP2X_MODULE_RAWHIST0) {
278 size = 4 + ISP_RAWHIST_LITE_WEIGHT - ISP_RAWHIST_LITE_CTRL;
279 reg_buf->isp_size[ISP2X_ID_RAWHIST0] = size;
280 reg_buf->isp_offset[ISP2X_ID_RAWHIST0] = offset;
281 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_RAWHIST_LITE_CTRL, size);
282 offset += size;
283
284 size = ISP2X_HIST_BIN_N_MAX * sizeof(tmp_statsbuf->params.rawhist0.hist_bin[0]);
285 reg_buf->isp_size[ISP2X_ID_RAWHIST0] += size;
286 reg_buf->isp_stats_size[ISP2X_ID_RAWHIST0] = size;
287 if (tmp_statsbuf->frame_id == reg_buf->frame_id)
288 memcpy(®_buf->reg[offset], &tmp_statsbuf->params.rawhist0.hist_bin[0], size);
289 else
290 memset(®_buf->reg[offset], 0, size);
291 offset += size;
292 }
293
294 if (rkisp_debug_reg & ISP2X_MODULE_RAWHIST1) {
295 size = 4 + ISP_RAWHIST_BIG_WEIGHT_BASE - ISP_RAWHIST_BIG_CTRL;
296 reg_buf->isp_size[ISP2X_ID_RAWHIST1] = size;
297 reg_buf->isp_offset[ISP2X_ID_RAWHIST1] = offset;
298 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_RAWHIST_BIG1_BASE, size);
299 offset += size;
300
301 size = ISP2X_HIST_BIN_N_MAX * sizeof(tmp_statsbuf->params.rawhist1.hist_bin[0]);
302 reg_buf->isp_size[ISP2X_ID_RAWHIST1] += size;
303 reg_buf->isp_stats_size[ISP2X_ID_RAWHIST1] = size;
304 if (tmp_statsbuf->frame_id == reg_buf->frame_id)
305 memcpy(®_buf->reg[offset], &tmp_statsbuf->params.rawhist1.hist_bin[0], size);
306 else
307 memset(®_buf->reg[offset], 0, size);
308 offset += size;
309 }
310
311 if (rkisp_debug_reg & ISP2X_MODULE_RAWHIST2) {
312 size = 4 + ISP_RAWHIST_BIG_WEIGHT_BASE - ISP_RAWHIST_BIG_CTRL;
313 reg_buf->isp_size[ISP2X_ID_RAWHIST2] = size;
314 reg_buf->isp_offset[ISP2X_ID_RAWHIST2] = offset;
315 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_RAWHIST_BIG2_BASE, size);
316 offset += size;
317
318 size = ISP2X_HIST_BIN_N_MAX * sizeof(tmp_statsbuf->params.rawhist2.hist_bin[0]);
319 reg_buf->isp_size[ISP2X_ID_RAWHIST2] += size;
320 reg_buf->isp_stats_size[ISP2X_ID_RAWHIST2] = size;
321 if (tmp_statsbuf->frame_id == reg_buf->frame_id)
322 memcpy(®_buf->reg[offset], &tmp_statsbuf->params.rawhist2.hist_bin[0], size);
323 else
324 memset(®_buf->reg[offset], 0, size);
325 offset += size;
326 }
327
328 if (rkisp_debug_reg & ISP2X_MODULE_RAWHIST3) {
329 size = 4 + ISP_RAWHIST_BIG_WEIGHT_BASE - ISP_RAWHIST_BIG_CTRL;
330 reg_buf->isp_size[ISP2X_ID_RAWHIST3] = size;
331 reg_buf->isp_offset[ISP2X_ID_RAWHIST3] = offset;
332 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_RAWHIST_BIG3_BASE, size);
333 offset += size;
334
335 size = ISP2X_HIST_BIN_N_MAX * sizeof(tmp_statsbuf->params.rawhist3.hist_bin[0]);
336 reg_buf->isp_size[ISP2X_ID_RAWHIST3] += size;
337 reg_buf->isp_stats_size[ISP2X_ID_RAWHIST3] = size;
338 if (tmp_statsbuf->frame_id == reg_buf->frame_id)
339 memcpy(®_buf->reg[offset], &tmp_statsbuf->params.rawhist3.hist_bin[0], size);
340 else
341 memset(®_buf->reg[offset], 0, size);
342 offset += size;
343 }
344
345 if (rkisp_debug_reg & ISP2X_MODULE_HDRMGE) {
346 size = 4 + ISP_HDRMGE_OVER_Y16 - ISP_HDRMGE_CTRL;
347 reg_buf->isp_size[ISP2X_ID_HDRMGE] = size;
348 reg_buf->isp_offset[ISP2X_ID_HDRMGE] = offset;
349 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_HDRMGE_CTRL, size);
350 offset += size;
351 }
352
353 if (rkisp_debug_reg & ISP2X_MODULE_RAWNR) {
354 size = 4 + ISP_RAWNR_RGBAIN_FLIP - ISP_RAWNR_CTRL;
355 reg_buf->isp_size[ISP2X_ID_RAWNR] = size;
356 reg_buf->isp_offset[ISP2X_ID_RAWNR] = offset;
357 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_RAWNR_CTRL, size);
358 offset += size;
359 }
360
361 if (rkisp_debug_reg & ISP2X_MODULE_HDRTMO) {
362 size = 4 + ISP_HDRTMO_HIST_RO31 - ISP_HDRTMO_CTRL;
363 reg_buf->isp_size[ISP2X_ID_HDRTMO] = size;
364 reg_buf->isp_offset[ISP2X_ID_HDRTMO] = offset;
365 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_HDRTMO_CTRL, size);
366 offset += size;
367 }
368
369 if (rkisp_debug_reg & ISP2X_MODULE_GIC) {
370 size = 4 + ISP_GIC_NOISE_CTRL1 - ISP_GIC_CONTROL;
371 reg_buf->isp_size[ISP2X_ID_GIC] = size;
372 reg_buf->isp_offset[ISP2X_ID_GIC] = offset;
373 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_GIC_CONTROL, size);
374 offset += size;
375 }
376
377 if (rkisp_debug_reg & ISP2X_MODULE_DHAZ) {
378 size = 4 + ISP_DHAZ_HIST_REG95 - ISP_DHAZ_CTRL;
379 reg_buf->isp_size[ISP2X_ID_DHAZ] = size;
380 reg_buf->isp_offset[ISP2X_ID_DHAZ] = offset;
381 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_DHAZ_CTRL, size);
382 offset += size;
383 }
384
385 if (rkisp_debug_reg & ISP2X_MODULE_3DLUT) {
386 size = 4 + ISP_3DLUT_UPDATE - ISP_3DLUT_CTRL;
387 reg_buf->isp_size[ISP2X_ID_3DLUT] = size;
388 reg_buf->isp_offset[ISP2X_ID_3DLUT] = offset;
389 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_3DLUT_CTRL, size);
390 offset += size;
391 }
392
393 if (rkisp_debug_reg & ISP2X_MODULE_LDCH) {
394 size = 4 + ISP_LDCH_STS - ISP_LDCH_STS;
395 reg_buf->isp_size[ISP2X_ID_LDCH] = size;
396 reg_buf->isp_offset[ISP2X_ID_LDCH] = offset;
397 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_LDCH_STS, size);
398 offset += size;
399 }
400
401 if (rkisp_debug_reg & ISP2X_MODULE_GAIN) {
402 size = 4 + ISP_GAIN_LUT8 - ISP_GAIN_CTRL;
403 reg_buf->isp_size[ISP2X_ID_GAIN] = size;
404 reg_buf->isp_offset[ISP2X_ID_GAIN] = offset;
405 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_GAIN_CTRL, size);
406 offset += size;
407 }
408
409 if (rkisp_debug_reg & ISP2X_MODULE_DEBAYER) {
410 size = 4 + ISP_DEBAYER_C_FILTER - ISP_DEBAYER_CONTROL;
411 reg_buf->isp_size[ISP2X_ID_DEBAYER] = size;
412 reg_buf->isp_offset[ISP2X_ID_DEBAYER] = offset;
413 memcpy_fromio(®_buf->reg[offset], hw->base_addr + ISP_DEBAYER_CONTROL, size);
414 offset += size;
415 }
416
417 reg_buf->reg_size = offset;
418 }
419
rkisp_bridge_try_sendtohal(struct rkisp_device * dev)420 static void rkisp_bridge_try_sendtohal(struct rkisp_device *dev)
421 {
422 struct rkisp_hw_dev *hw = dev->hw_dev;
423 struct rkisp_bridge_device *br_dev = &dev->br_dev;
424 struct rkisp_ispp_buf *cur_fbcgain = dev->cur_fbcgain;
425 struct rkisp_buffer *cur_spbuf = dev->cur_spbuf;
426 struct isp2x_ispgain_buf *buf;
427 struct rkisp_bridge_buf *bdgbuf;
428 struct vb2_buffer *vb2_buf;
429 u32 *vaddr, size;
430 u64 pic_ts, gain_ts, sp_ts;
431
432 if (cur_fbcgain && cur_spbuf) {
433 if ((cur_fbcgain->frame_id == cur_spbuf->vb.sequence) &&
434 (cur_fbcgain->index == cur_spbuf->dev_id)) {
435 vb2_buf = &cur_spbuf->vb.vb2_buf;
436 cur_fbcgain->buf_idx = vb2_buf->index;
437 buf = vb2_plane_vaddr(vb2_buf, 1);
438 buf->gain_dmaidx = cur_fbcgain->gain_dmaidx;
439 buf->mfbc_dmaidx = cur_fbcgain->mfbc_dmaidx;
440 buf->gain_size = cur_fbcgain->gain_size;
441 buf->mfbc_size = cur_fbcgain->mfbc_size;
442 buf->frame_id = cur_fbcgain->frame_id;
443 bdgbuf = to_bridge_buf(cur_fbcgain);
444 rkisp_finish_buffer(dev, &bdgbuf->dummy[GROUP_BUF_GAIN]);
445 vb2_buffer_done(&cur_spbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
446 list_add_tail(&cur_fbcgain->list, &hw->rpt_list);
447 dev->cur_fbcgain = NULL;
448 dev->cur_spbuf = NULL;
449 v4l2_dbg(3, rkisp_debug, &br_dev->sd,
450 "%s send mfbcgain buf to hal, frame_id %d\n",
451 __func__, cur_fbcgain->frame_id);
452
453 bdgbuf = to_bridge_buf(cur_fbcgain);
454 vaddr = bdgbuf->dummy[GROUP_BUF_PIC].vaddr;
455 size = bdgbuf->dummy[GROUP_BUF_PIC].size;
456 pic_ts = *(u64 *)(vaddr + size / 4 - 2);
457
458 vaddr = bdgbuf->dummy[GROUP_BUF_GAIN].vaddr;
459 size = bdgbuf->dummy[GROUP_BUF_GAIN].size;
460 gain_ts = *(u64 *)(vaddr + size / 4 - 2);
461
462 size = vb2_plane_size(&cur_spbuf->vb.vb2_buf, 0);
463 vaddr = (u32 *)vb2_plane_vaddr(&cur_spbuf->vb.vb2_buf, 0);
464 sp_ts = *(u64 *)(vaddr + size / 4 - 2);
465 if (abs(pic_ts - gain_ts) > 5000000LL || abs(pic_ts - sp_ts) > 5000000LL ||
466 abs(gain_ts - sp_ts) > 5000000LL) {
467 v4l2_info(&br_dev->sd,
468 "%s: frame %d, timestamp is not match (pic_ts %lld, gain_ts %lld, sp_ts %lld)\n",
469 __func__, cur_fbcgain->frame_id, pic_ts, gain_ts, sp_ts);
470 }
471 } else {
472 v4l2_info(&br_dev->sd,
473 "%s frame_id(%d, %d) or dev_id(%d, %d) is not match\n",
474 __func__,
475 cur_fbcgain->frame_id, cur_spbuf->vb.sequence,
476 cur_fbcgain->index, cur_spbuf->dev_id);
477 }
478 }
479 }
480
rkisp_bridge_save_fbcgain(struct rkisp_device * dev,struct rkisp_ispp_buf * fbcgain)481 static void rkisp_bridge_save_fbcgain(struct rkisp_device *dev, struct rkisp_ispp_buf *fbcgain)
482 {
483 struct rkisp_hw_dev *hw = dev->hw_dev;
484 struct rkisp_bridge_device *br_dev = &dev->br_dev;
485 unsigned long lock_flags = 0;
486
487 spin_lock_irqsave(&hw->buf_lock, lock_flags);
488 if (dev->cur_fbcgain) {
489 v4l2_dbg(1, rkisp_debug, &br_dev->sd,
490 "%s old mfbcgain buf is exit, frame_id %d\n",
491 __func__, dev->cur_fbcgain->frame_id);
492 list_add_tail(&dev->cur_fbcgain->list, &hw->list);
493 dev->cur_fbcgain = NULL;
494 br_dev->dbg.frameloss++;
495 }
496 dev->cur_fbcgain = fbcgain;
497 rkisp_bridge_try_sendtohal(dev);
498 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
499 }
500
rkisp_bridge_work(struct work_struct * work)501 static void rkisp_bridge_work(struct work_struct *work)
502 {
503 struct rkisp_bridge_work *br_wk =
504 container_of(work, struct rkisp_bridge_work, work);
505 struct rkisp_bridge_device *dev = br_wk->dev;
506
507 struct rkisp_ispp_reg *reg_buf = (struct rkisp_ispp_reg *)br_wk->param;
508
509 dump_dbg_reg(dev, reg_buf);
510
511 kfree(br_wk);
512 }
513
config_gain(struct rkisp_bridge_device * dev)514 static int config_gain(struct rkisp_bridge_device *dev)
515 {
516 u32 w = dev->crop.width;
517 u32 h = dev->crop.height;
518 u32 val;
519
520 val = ALIGN(w, 64) * ALIGN(h, 128) >> 4;
521 rkisp_write(dev->ispdev, MI_GAIN_WR_SIZE, val, false);
522 val = ALIGN((w + 3) >> 2, 16);
523 rkisp_write(dev->ispdev, MI_GAIN_WR_LENGTH, val, false);
524 rkisp_set_bits(dev->ispdev, MI_WR_CTRL2,
525 0, SW_GAIN_WR_AUTOUPD, true);
526 return 0;
527 }
528
crop_on(struct rkisp_bridge_device * dev)529 static void crop_on(struct rkisp_bridge_device *dev)
530 {
531 struct rkisp_device *ispdev = dev->ispdev;
532 u32 src_w = ispdev->isp_sdev.out_crop.width;
533 u32 src_h = ispdev->isp_sdev.out_crop.height;
534 u32 dest_w = dev->crop.width;
535 u32 dest_h = dev->crop.height;
536 u32 left = dev->crop.left;
537 u32 top = dev->crop.top;
538 u32 ctrl = CIF_DUAL_CROP_CFG_UPD;
539
540 if (ispdev->isp_ver == ISP_V20 &&
541 ispdev->rd_mode == HDR_RDBK_FRAME1 &&
542 ispdev->isp_sdev.in_fmt.fmt_type == FMT_BAYER)
543 src_h += RKMODULE_EXTEND_LINE;
544
545 rkisp_write(ispdev, CIF_DUAL_CROP_M_H_OFFS, left, false);
546 rkisp_write(ispdev, CIF_DUAL_CROP_M_V_OFFS, top, false);
547 rkisp_write(ispdev, CIF_DUAL_CROP_M_H_SIZE, dest_w, false);
548 rkisp_write(ispdev, CIF_DUAL_CROP_M_V_SIZE, dest_h, false);
549 ctrl |= rkisp_read(ispdev, CIF_DUAL_CROP_CTRL, true);
550 if (src_w == dest_w && src_h == dest_h)
551 ctrl &= ~(CIF_DUAL_CROP_MP_MODE_YUV | CIF_DUAL_CROP_MP_MODE_RAW);
552 else
553 ctrl |= CIF_DUAL_CROP_MP_MODE_YUV;
554 rkisp_write(ispdev, CIF_DUAL_CROP_CTRL, ctrl, false);
555 }
556
crop_off(struct rkisp_bridge_device * dev)557 static void crop_off(struct rkisp_bridge_device *dev)
558 {
559 struct rkisp_device *ispdev = dev->ispdev;
560 u32 ctrl = CIF_DUAL_CROP_GEN_CFG_UPD;
561
562 ctrl = rkisp_read(ispdev, CIF_DUAL_CROP_CTRL, true);
563 ctrl &= ~(CIF_DUAL_CROP_MP_MODE_YUV | CIF_DUAL_CROP_MP_MODE_RAW);
564 rkisp_write(ispdev, CIF_DUAL_CROP_CTRL, ctrl, false);
565 }
566
bridge_start(struct rkisp_bridge_device * dev)567 static int bridge_start(struct rkisp_bridge_device *dev)
568 {
569 struct rkisp_device *ispdev = dev->ispdev;
570 struct rkisp_stream *sp_stream;
571
572 hdr_config_dmatx(ispdev);
573
574 sp_stream = &ispdev->cap_dev.stream[RKISP_STREAM_SP];
575 crop_on(dev);
576 config_gain(dev);
577 dev->ops->config(dev);
578 rkisp_start_spstream(sp_stream);
579
580 if (!dev->ispdev->hw_dev->is_mi_update) {
581 rkisp_config_dmatx_valid_buf(dev->ispdev);
582 force_cfg_update(dev->ispdev);
583 rkisp_update_spstream_buf(sp_stream);
584 hdr_update_dmatx_buf(dev->ispdev);
585 }
586 rkisp_stats_first_ddr_config(&dev->ispdev->stats_vdev);
587 ispdev->skip_frame = 0;
588 dev->en = true;
589
590 ispdev->cap_dev.is_done_early = false;
591 if (ispdev->send_fbcgain)
592 ispdev->cap_dev.wait_line = 0;
593 if (ispdev->cap_dev.wait_line) {
594 if (ispdev->cap_dev.wait_line < dev->crop.height / 4)
595 ispdev->cap_dev.wait_line = dev->crop.height / 4;
596 ispdev->cap_dev.is_done_early = true;
597 }
598 return 0;
599 }
600
bridge_stop(struct rkisp_bridge_device * dev)601 static int bridge_stop(struct rkisp_bridge_device *dev)
602 {
603 struct rkisp_stream *sp_stream;
604 int ret;
605 u32 irq;
606
607 sp_stream = &dev->ispdev->cap_dev.stream[RKISP_STREAM_SP];
608 dev->stopping = true;
609 dev->ops->disable(dev);
610 rkisp_stop_spstream(sp_stream);
611 hdr_stop_dmatx(dev->ispdev);
612 if (!dev->ispdev->hw_dev->is_shutdown) {
613 ret = wait_event_timeout(dev->done, !dev->en,
614 msecs_to_jiffies(1000));
615 if (!ret)
616 v4l2_warn(&dev->sd,
617 "%s timeout ret:%d\n", __func__, ret);
618 }
619 crop_off(dev);
620 dev->stopping = false;
621 dev->en = false;
622 irq = dev->cfg->frame_end_id;
623 irq = (irq == MI_MPFBC_FRAME) ? ISP_FRAME_MPFBC : ISP_FRAME_MP;
624 dev->ispdev->irq_ends_mask &= ~irq;
625 drain_workqueue(dev->wq);
626
627 /* make sure ispp last frame done */
628 if (dev->work_mode & ISP_ISPP_QUICK) {
629 rkisp_clear_bits(dev->ispdev, MI_IMSC,
630 dev->cfg->frame_end_id, true);
631 usleep_range(20000, 25000);
632 }
633 return 0;
634 }
635
bridge_update_mi(struct rkisp_bridge_device * br)636 static void bridge_update_mi(struct rkisp_bridge_device *br)
637 {
638 struct rkisp_device *dev = br->ispdev;
639 struct rkisp_hw_dev *hw = dev->hw_dev;
640 struct rkisp_bridge_buf *buf;
641 u32 val;
642
643 if (hw->nxt_buf) {
644 buf = to_bridge_buf(hw->nxt_buf);
645 val = buf->dummy[GROUP_BUF_PIC].dma_addr;
646 rkisp_write(dev, br->cfg->reg.y0_base, val, true);
647 val += br->cfg->offset;
648 rkisp_write(dev, br->cfg->reg.uv0_base, val, true);
649 val = buf->dummy[GROUP_BUF_GAIN].dma_addr;
650 rkisp_write(dev, br->cfg->reg.g0_base, val, true);
651 }
652
653 if (dev->cap_dev.is_done_early && !br->frame_early) {
654 br->frame_early = true;
655 hrtimer_start(&br->frame_qst, ns_to_ktime(1000000), HRTIMER_MODE_REL);
656 }
657
658 v4l2_dbg(2, rkisp_debug, &br->sd,
659 "update pic(shd:0x%x base:0x%x) gain(shd:0x%x base:0x%x)\n",
660 rkisp_read(dev, br->cfg->reg.y0_base_shd, true),
661 rkisp_read(dev, br->cfg->reg.y0_base, true),
662 rkisp_read(dev, br->cfg->reg.g0_base_shd, true),
663 rkisp_read(dev, br->cfg->reg.g0_base, true));
664 }
665
bridge_frame_end(struct rkisp_bridge_device * dev,u32 state)666 static int bridge_frame_end(struct rkisp_bridge_device *dev, u32 state)
667 {
668 struct rkisp_device *ispdev = dev->ispdev;
669 struct rkisp_hw_dev *hw = ispdev->hw_dev;
670 struct v4l2_subdev *sd = v4l2_get_subdev_hostdata(&dev->sd);
671 unsigned long lock_flags = 0;
672 u64 ns = ktime_get_ns();
673 struct rkisp_bridge_buf *buf;
674 u32 val;
675
676 if (dev->stopping) {
677 if (dev->ops->is_stopped(dev)) {
678 dev->en = false;
679 dev->stopping = false;
680 wake_up(&dev->done);
681 }
682 }
683
684 if (!dev->en) {
685 val = dev->cfg->frame_end_id;
686 val = (val == MI_MPFBC_FRAME) ? ISP_FRAME_MPFBC : ISP_FRAME_MP;
687 ispdev->irq_ends_mask &= ~val;
688 return 0;
689 }
690
691 if (dev->work_mode & ISP_ISPP_QUICK ||
692 (state == FRAME_IRQ && ispdev->cap_dev.is_done_early))
693 return 0;
694 dev->frame_early = false;
695 rkisp_dmarx_get_frame(dev->ispdev, &dev->dbg.id, NULL, NULL, true);
696 dev->dbg.interval = ns - dev->dbg.timestamp;
697 dev->dbg.timestamp = ns;
698 if (hw->cur_buf && hw->nxt_buf) {
699 if (ispdev->skip_frame > 0) {
700 ispdev->skip_frame--;
701 spin_lock_irqsave(&hw->buf_lock, lock_flags);
702 list_add_tail(&hw->cur_buf->list, &hw->list);
703 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
704 } else {
705 u64 sof_ns = 0;
706 struct rkisp_ispp_reg *reg_buf = NULL;
707
708 ns = 0;
709 rkisp_dmarx_get_frame(ispdev, &hw->cur_buf->frame_id,
710 &sof_ns, &ns, true);
711 if (!sof_ns)
712 sof_ns = 0;
713 if (!ns)
714 ns = ktime_get_ns();
715 hw->cur_buf->frame_timestamp = ns;
716 hw->cur_buf->index = ispdev->dev_id;
717 v4l2_subdev_call(sd, core, ioctl, RKISP_ISPP_CMD_REQUEST_REGBUF,
718 ®_buf);
719 if (reg_buf) {
720 struct rkisp_bridge_work *br_wk;
721
722 br_wk = kzalloc(sizeof(struct rkisp_bridge_work), GFP_ATOMIC);
723 if (br_wk) {
724 reg_buf->stat = ISP_ISPP_INUSE;
725 reg_buf->dev_id = hw->cur_buf->index;
726 reg_buf->frame_id = hw->cur_buf->frame_id;
727 reg_buf->sof_timestamp = sof_ns;
728 reg_buf->frame_timestamp = hw->cur_buf->frame_timestamp;
729 reg_buf->exposure = ispdev->params_vdev.exposure;
730
731 br_wk->dev = dev;
732 br_wk->param = (void *)reg_buf;
733 INIT_WORK((struct work_struct *)&br_wk->work, rkisp_bridge_work);
734 if (!queue_work(dev->wq, (struct work_struct *)&br_wk->work)) {
735 v4l2_err(&dev->sd, "queue work failed\n");
736 kfree(br_wk);
737 }
738 }
739 }
740
741 if (ispdev->send_fbcgain) {
742 u32 *vaddr, size;
743
744 buf = to_bridge_buf(hw->cur_buf);
745 vaddr = buf->dummy[GROUP_BUF_PIC].vaddr;
746 size = buf->dummy[GROUP_BUF_PIC].size;
747 *(u64 *)(vaddr + size / 4 - 2) = ktime_get_ns();
748
749 vaddr = buf->dummy[GROUP_BUF_GAIN].vaddr;
750 size = buf->dummy[GROUP_BUF_GAIN].size;
751 *(u64 *)(vaddr + size / 4 - 2) = ktime_get_ns();
752 hw->cur_buf->mfbc_dmaidx = hw->cur_buf->didx[GROUP_BUF_PIC];
753 hw->cur_buf->gain_dmaidx = hw->cur_buf->didx[GROUP_BUF_GAIN];
754 hw->cur_buf->is_move_judge = true;
755 rkisp_bridge_save_fbcgain(ispdev, hw->cur_buf);
756 } else {
757 hw->cur_buf->is_move_judge = false;
758 v4l2_subdev_call(sd, video, s_rx_buffer, hw->cur_buf, NULL);
759 }
760 }
761 hw->cur_buf = NULL;
762 } else {
763 v4l2_dbg(1, rkisp_debug, &dev->sd, "no buf, lost frame:%d\n", dev->dbg.id);
764 dev->dbg.frameloss++;
765 }
766
767 if (hw->nxt_buf) {
768 hw->cur_buf = hw->nxt_buf;
769 hw->nxt_buf = NULL;
770 }
771
772 return 0;
773 }
774
rkisp_bridge_frame_done_early(struct hrtimer * timer)775 static enum hrtimer_restart rkisp_bridge_frame_done_early(struct hrtimer *timer)
776 {
777 struct rkisp_bridge_device *br =
778 container_of(timer, struct rkisp_bridge_device, frame_qst);
779 struct rkisp_device *dev = br->ispdev;
780 enum hrtimer_restart ret = HRTIMER_NORESTART;
781 u32 ycnt, line = dev->cap_dev.wait_line;
782 u32 seq, time, max_time = 1000000;
783 u64 ns = ktime_get_ns();
784
785 time = (u32)(ns - br->fs_ns);
786 ycnt = rkisp_read(dev, ISP_MPFBC_ENC_POS, true) & 0x3ff;
787 ycnt *= 8;
788 rkisp_dmarx_get_frame(dev, &seq, NULL, NULL, true);
789 if (!br->en || dev->isp_state == ISP_STOP) {
790 goto end;
791 } else if (ycnt < line) {
792 if (!ycnt)
793 ns = max_time;
794 else
795 ns = time * (line - ycnt) / ycnt;
796 if (ns > max_time)
797 ns = max_time;
798 hrtimer_forward(timer, timer->base->get_time(), ns_to_ktime(ns));
799 ret = HRTIMER_RESTART;
800 } else {
801 v4l2_dbg(3, rkisp_debug, &dev->v4l2_dev,
802 "%s seq:%d line:%d ycnt:%d time:%dus\n",
803 __func__, seq, line, ycnt, time / 1000);
804 bridge_frame_end(br, FRAME_WORK);
805 }
806 end:
807 return ret;
808 }
809
config_mpfbc(struct rkisp_bridge_device * dev)810 static int config_mpfbc(struct rkisp_bridge_device *dev)
811 {
812 struct rkisp_hw_dev *hw = dev->ispdev->hw_dev;
813 u32 h = hw->max_in.h ? hw->max_in.h : dev->crop.height;
814 u32 ctrl = 0;
815
816 if (dev->work_mode & ISP_ISPP_QUICK) {
817 rkisp_set_bits(dev->ispdev, CTRL_SWS_CFG,
818 0, SW_ISP2PP_PIPE_EN, true);
819 ctrl = SW_MPFBC_MAINISP_MODE;
820 if (dev->ispdev->hw_dev->nxt_buf)
821 ctrl |= SW_MPFBC_PINGPONG_EN;
822 }
823
824 rkisp_write(dev->ispdev, ISP_MPFBC_VIR_WIDTH, 0, true);
825 rkisp_write(dev->ispdev, ISP_MPFBC_VIR_HEIGHT, ALIGN(h, 16), true);
826
827 ctrl |= (dev->work_mode & ISP_ISPP_422) | SW_MPFBC_EN;
828 rkisp_write(dev->ispdev, ISP_MPFBC_BASE, ctrl, true);
829 rkisp_set_bits(dev->ispdev, MI_WR_CTRL, MI_LUM_BURST_MASK,
830 MI_MIPI_LUM_BURST16, true);
831 dev->ispdev->irq_ends_mask |= ISP_FRAME_MPFBC;
832 return 0;
833 }
834
disable_mpfbc(struct rkisp_bridge_device * dev)835 static void disable_mpfbc(struct rkisp_bridge_device *dev)
836 {
837 if (dev->ispdev->hw_dev->is_single)
838 rkisp_clear_bits(dev->ispdev, ISP_MPFBC_BASE,
839 SW_MPFBC_EN, true);
840 }
841
is_stopped_mpfbc(struct rkisp_bridge_device * dev)842 static bool is_stopped_mpfbc(struct rkisp_bridge_device *dev)
843 {
844 bool en = true;
845
846 if (dev->ispdev->hw_dev->is_single)
847 en = is_mpfbc_stopped(dev->ispdev->base_addr);
848 return en;
849 }
850
851 static struct rkisp_bridge_ops mpfbc_ops = {
852 .config = config_mpfbc,
853 .disable = disable_mpfbc,
854 .is_stopped = is_stopped_mpfbc,
855 .frame_end = bridge_frame_end,
856 .update_mi = bridge_update_mi,
857 .start = bridge_start,
858 .stop = bridge_stop,
859 };
860
861 static struct rkisp_bridge_config mpfbc_cfg = {
862 .frame_end_id = MI_MPFBC_FRAME,
863 .reg = {
864 .y0_base = ISP_MPFBC_HEAD_PTR,
865 .uv0_base = ISP_MPFBC_PAYL_PTR,
866 .y1_base = ISP_MPFBC_HEAD_PTR2,
867 .uv1_base = ISP_MPFBC_PAYL_PTR2,
868 .g0_base = MI_GAIN_WR_BASE,
869 .g1_base = MI_GAIN_WR_BASE2,
870
871 .y0_base_shd = ISP_MPFBC_HEAD_PTR,
872 .uv0_base_shd = ISP_MPFBC_PAYL_PTR,
873 .g0_base_shd = MI_GAIN_WR_BASE_SHD,
874 },
875 };
876
config_mp(struct rkisp_bridge_device * dev)877 static int config_mp(struct rkisp_bridge_device *dev)
878 {
879 u32 w = dev->crop.width;
880 u32 h = dev->crop.height;
881 u32 val;
882
883 if (dev->work_mode & ISP_ISPP_QUICK) {
884 rkisp_set_bits(dev->ispdev, CTRL_SWS_CFG, 0,
885 SW_ISP2PP_PIPE_EN, true);
886 if (dev->ispdev->hw_dev->nxt_buf)
887 rkisp_set_bits(dev->ispdev, CIF_MI_CTRL, 0,
888 CIF_MI_MP_PINGPONG_ENABLE, true);
889 }
890
891 val = w * h;
892 rkisp_write(dev->ispdev, CIF_MI_MP_Y_SIZE_INIT, val, false);
893 val = (dev->work_mode & ISP_ISPP_422) ? val : val / 2;
894 rkisp_write(dev->ispdev, CIF_MI_MP_CB_SIZE_INIT, val, false);
895 rkisp_write(dev->ispdev, CIF_MI_MP_CR_SIZE_INIT, 0, false);
896 rkisp_write(dev->ispdev, CIF_MI_MP_Y_OFFS_CNT_INIT, 0, false);
897 rkisp_write(dev->ispdev, CIF_MI_MP_CB_OFFS_CNT_INIT, 0, false);
898 rkisp_write(dev->ispdev, CIF_MI_MP_CR_OFFS_CNT_INIT, 0, false);
899
900 rkisp_write(dev->ispdev, ISP_MPFBC_BASE,
901 dev->work_mode & ISP_ISPP_422, true);
902 rkisp_set_bits(dev->ispdev, CIF_MI_CTRL, MI_CTRL_MP_FMT_MASK,
903 MI_CTRL_MP_WRITE_YUV_SPLA | CIF_MI_CTRL_MP_ENABLE |
904 CIF_MI_MP_AUTOUPDATE_ENABLE, true);
905 dev->ispdev->irq_ends_mask |= ISP_FRAME_MP;
906 return 0;
907 }
908
disable_mp(struct rkisp_bridge_device * dev)909 static void disable_mp(struct rkisp_bridge_device *dev)
910 {
911 if (dev->ispdev->hw_dev->is_single)
912 rkisp_clear_bits(dev->ispdev, CIF_MI_CTRL,
913 CIF_MI_CTRL_MP_ENABLE |
914 CIF_MI_CTRL_RAW_ENABLE, true);
915 }
916
is_stopped_mp(struct rkisp_bridge_device * dev)917 static bool is_stopped_mp(struct rkisp_bridge_device *dev)
918 {
919 bool en = true;
920
921 if (dev->ispdev->hw_dev->is_single)
922 en = mp_is_stream_stopped(dev->ispdev->base_addr);
923 return en;
924 }
925
926 static struct rkisp_bridge_ops mp_ops = {
927 .config = config_mp,
928 .disable = disable_mp,
929 .is_stopped = is_stopped_mp,
930 .frame_end = bridge_frame_end,
931 .update_mi = bridge_update_mi,
932 .start = bridge_start,
933 .stop = bridge_stop,
934 };
935
936 static struct rkisp_bridge_config mp_cfg = {
937 .frame_end_id = MI_MP_FRAME,
938 .reg = {
939 .y0_base = MI_MP_WR_Y_BASE,
940 .uv0_base = MI_MP_WR_CB_BASE,
941 .y1_base = MI_MP_WR_Y_BASE2,
942 .uv1_base = MI_MP_WR_CB_BASE2,
943 .g0_base = MI_GAIN_WR_BASE,
944 .g1_base = MI_GAIN_WR_BASE2,
945
946 .y0_base_shd = MI_MP_WR_Y_BASE_SHD,
947 .uv0_base_shd = MI_MP_WR_CB_BASE_SHD,
948 .g0_base_shd = MI_GAIN_WR_BASE_SHD,
949 },
950 };
951
rkisp_bridge_get_fbcbuf_fd(struct rkisp_device * dev,struct isp2x_buf_idxfd * idxfd)952 int rkisp_bridge_get_fbcbuf_fd(struct rkisp_device *dev, struct isp2x_buf_idxfd *idxfd)
953 {
954 struct rkisp_hw_dev *hw = dev->hw_dev;
955 struct rkisp_bridge_device *br_dev = &dev->br_dev;
956 struct rkisp_bridge_buf *buf;
957 struct rkisp_dummy_buffer *dummy;
958 unsigned long lock_flags = 0;
959 int i, j, buf_idx;
960
961 spin_lock_irqsave(&hw->buf_lock, lock_flags);
962 if (!hw->is_buf_init) {
963 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
964 return -EAGAIN;
965 }
966 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
967
968 buf_idx = 0;
969 for (i = 0; i < br_dev->buf_num; i++) {
970 buf = &hw->bufs[i];
971 for (j = 0; j < GROUP_BUF_MAX; j++) {
972 dummy = &buf->dummy[j];
973 buf->dbufs.dfd[j] = dma_buf_fd(dummy->dbuf, O_CLOEXEC);
974 get_dma_buf(buf->dbufs.dbuf[j]);
975 idxfd->index[buf_idx] = buf->dbufs.didx[j];
976 idxfd->dmafd[buf_idx] = buf->dbufs.dfd[j];
977 buf_idx++;
978 }
979 }
980
981 idxfd->buf_num = buf_idx;
982
983 return 0;
984 }
985
rkisp_bridge_sendtopp_buffer(struct rkisp_device * dev,u32 dev_id,u32 buf_idx)986 void rkisp_bridge_sendtopp_buffer(struct rkisp_device *dev, u32 dev_id, u32 buf_idx)
987 {
988 struct rkisp_hw_dev *hw = dev->hw_dev;
989 struct rkisp_bridge_device *br_dev = &dev->br_dev;
990 struct v4l2_subdev *sd = v4l2_get_subdev_hostdata(&br_dev->sd);
991 struct rkisp_ispp_buf *cur_buf, *cur_buf_tmp, *find_buf;
992 struct rkisp_bridge_buf *buf;
993 unsigned long lock_flags = 0;
994 bool find_flg = false;
995 u32 *vaddr, size;
996 u64 pic_ts, gain_ts;
997
998 spin_lock_irqsave(&hw->buf_lock, lock_flags);
999 list_for_each_entry(cur_buf, &hw->rpt_list, list) {
1000 if (cur_buf->index == dev_id && cur_buf->buf_idx == buf_idx) {
1001 find_flg = true;
1002 break;
1003 }
1004 }
1005
1006 if (find_flg) {
1007 list_del(&cur_buf->list);
1008 find_buf = cur_buf;
1009 list_for_each_entry_safe(cur_buf, cur_buf_tmp, &hw->rpt_list, list) {
1010 if ((cur_buf->frame_id < find_buf->frame_id) &&
1011 (cur_buf->index == find_buf->index)) {
1012 list_del_init(&cur_buf->list);
1013 v4l2_dbg(3, rkisp_debug, &br_dev->sd,
1014 "%s send buffer to pp, frame_id %d\n",
1015 __func__, cur_buf->frame_id);
1016
1017 buf = to_bridge_buf(cur_buf);
1018 rkisp_prepare_buffer(dev, &buf->dummy[GROUP_BUF_GAIN]);
1019 vaddr = buf->dummy[GROUP_BUF_PIC].vaddr;
1020 size = buf->dummy[GROUP_BUF_PIC].size;
1021 pic_ts = *(u64 *)(vaddr + size / 4 - 2);
1022
1023 vaddr = buf->dummy[GROUP_BUF_GAIN].vaddr;
1024 size = buf->dummy[GROUP_BUF_GAIN].size;
1025 gain_ts = *(u64 *)(vaddr + size / 4 - 2);
1026 if (abs(pic_ts - gain_ts) > 5000000LL) {
1027 v4l2_info(&br_dev->sd,
1028 "%s: frame %d, timestamp is not match (pic_ts %lld, gain_ts %lld)",
1029 __func__, cur_buf->frame_id, pic_ts, gain_ts);
1030 }
1031 cur_buf->is_move_judge = true;
1032 v4l2_subdev_call(sd, video, s_rx_buffer, cur_buf, NULL);
1033 }
1034 }
1035
1036 v4l2_dbg(3, rkisp_debug, &br_dev->sd,
1037 "%s send buffer to pp, frame_id %d\n",
1038 __func__, find_buf->frame_id);
1039
1040 buf = to_bridge_buf(find_buf);
1041 rkisp_prepare_buffer(dev, &buf->dummy[GROUP_BUF_GAIN]);
1042 vaddr = buf->dummy[GROUP_BUF_PIC].vaddr;
1043 size = buf->dummy[GROUP_BUF_PIC].size;
1044 pic_ts = *(u64 *)(vaddr + size / 4 - 2);
1045
1046 vaddr = buf->dummy[GROUP_BUF_GAIN].vaddr;
1047 size = buf->dummy[GROUP_BUF_GAIN].size;
1048 gain_ts = *(u64 *)(vaddr + size / 4 - 2);
1049 if (abs(pic_ts - gain_ts) > 5000000LL) {
1050 v4l2_info(&br_dev->sd,
1051 "%s: frame %d, timestamp is not match (pic_ts %lld, gain_ts %lld)",
1052 __func__, find_buf->frame_id, pic_ts, gain_ts);
1053 }
1054 find_buf->is_move_judge = true;
1055 v4l2_subdev_call(sd, video, s_rx_buffer, find_buf, NULL);
1056 }
1057 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
1058 }
1059
rkisp_bridge_save_spbuf(struct rkisp_device * dev,struct rkisp_buffer * sp_buf)1060 void rkisp_bridge_save_spbuf(struct rkisp_device *dev, struct rkisp_buffer *sp_buf)
1061 {
1062 struct rkisp_hw_dev *hw = dev->hw_dev;
1063 struct rkisp_bridge_device *br_dev = &dev->br_dev;
1064 unsigned long lock_flags = 0;
1065
1066 spin_lock_irqsave(&hw->buf_lock, lock_flags);
1067 if (dev->cur_spbuf) {
1068 v4l2_dbg(1, rkisp_debug, &br_dev->sd,
1069 "%s old sp buf is exit, frame_id %d\n",
1070 __func__, dev->cur_spbuf->vb.sequence);
1071 rkisp_spbuf_queue(&dev->cap_dev.stream[RKISP_STREAM_SP], dev->cur_spbuf);
1072 dev->cur_spbuf = NULL;
1073 dev->cap_dev.stream[RKISP_STREAM_SP].dbg.frameloss++;
1074 }
1075 dev->cur_spbuf = sp_buf;
1076 rkisp_bridge_try_sendtohal(dev);
1077 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
1078 }
1079
rkisp_bridge_stop_spstream(struct rkisp_device * dev)1080 void rkisp_bridge_stop_spstream(struct rkisp_device *dev)
1081 {
1082 struct rkisp_hw_dev *hw = dev->hw_dev;
1083 unsigned long lock_flags = 0;
1084
1085 spin_lock_irqsave(&hw->buf_lock, lock_flags);
1086 if (dev->cur_spbuf) {
1087 rkisp_spbuf_queue(&dev->cap_dev.stream[RKISP_STREAM_SP], dev->cur_spbuf);
1088 dev->cur_spbuf = NULL;
1089 }
1090 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
1091 }
1092
rkisp_bridge_init_ops_v20(struct rkisp_bridge_device * dev)1093 void rkisp_bridge_init_ops_v20(struct rkisp_bridge_device *dev)
1094 {
1095 if (dev->work_mode & ISP_ISPP_FBC) {
1096 dev->ops = &mpfbc_ops;
1097 dev->cfg = &mpfbc_cfg;
1098 } else {
1099 dev->ops = &mp_ops;
1100 dev->cfg = &mp_cfg;
1101 }
1102 dev->frame_qst.function = rkisp_bridge_frame_done_early;
1103 }
1104