1 /*
2 * Rockchip isp1 driver
3 *
4 * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/kfifo.h>
36 #include <media/v4l2-common.h>
37 #include <media/v4l2-ioctl.h>
38 #include <media/videobuf2-core.h>
39 #include <media/videobuf2-vmalloc.h> /* for ISP statistics */
40 #include "dev.h"
41 #include "regs.h"
42
43 #define RKISP1_ISP_STATS_REQ_BUFS_MIN 2
44 #define RKISP1_ISP_STATS_REQ_BUFS_MAX 8
45
rkisp1_stats_enum_fmt_meta_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)46 static int rkisp1_stats_enum_fmt_meta_cap(struct file *file, void *priv,
47 struct v4l2_fmtdesc *f)
48 {
49 struct video_device *video = video_devdata(file);
50 struct rkisp1_isp_stats_vdev *stats_vdev = video_get_drvdata(video);
51
52 if (f->index > 0 || f->type != video->queue->type)
53 return -EINVAL;
54
55 f->pixelformat = stats_vdev->vdev_fmt.fmt.meta.dataformat;
56 return 0;
57 }
58
rkisp1_stats_g_fmt_meta_cap(struct file * file,void * priv,struct v4l2_format * f)59 static int rkisp1_stats_g_fmt_meta_cap(struct file *file, void *priv,
60 struct v4l2_format *f)
61 {
62 struct video_device *video = video_devdata(file);
63 struct rkisp1_isp_stats_vdev *stats_vdev = video_get_drvdata(video);
64 struct v4l2_meta_format *meta = &f->fmt.meta;
65
66 if (f->type != video->queue->type)
67 return -EINVAL;
68
69 memset(meta, 0, sizeof(*meta));
70 meta->dataformat = stats_vdev->vdev_fmt.fmt.meta.dataformat;
71 meta->buffersize = stats_vdev->vdev_fmt.fmt.meta.buffersize;
72
73 return 0;
74 }
75
rkisp1_stats_querycap(struct file * file,void * priv,struct v4l2_capability * cap)76 static int rkisp1_stats_querycap(struct file *file,
77 void *priv, struct v4l2_capability *cap)
78 {
79 struct video_device *vdev = video_devdata(file);
80 struct rkisp1_isp_stats_vdev *stats_vdev = video_get_drvdata(vdev);
81
82 strcpy(cap->driver, DRIVER_NAME);
83 snprintf(cap->driver, sizeof(cap->driver),
84 "%s_v%d", DRIVER_NAME,
85 stats_vdev->dev->isp_ver >> 4);
86 strlcpy(cap->card, vdev->name, sizeof(cap->card));
87 strlcpy(cap->bus_info, "platform: " DRIVER_NAME, sizeof(cap->bus_info));
88
89 return 0;
90 }
91
92 /* ISP video device IOCTLs */
93 static const struct v4l2_ioctl_ops rkisp1_stats_ioctl = {
94 .vidioc_reqbufs = vb2_ioctl_reqbufs,
95 .vidioc_querybuf = vb2_ioctl_querybuf,
96 .vidioc_create_bufs = vb2_ioctl_create_bufs,
97 .vidioc_qbuf = vb2_ioctl_qbuf,
98 .vidioc_dqbuf = vb2_ioctl_dqbuf,
99 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
100 .vidioc_expbuf = vb2_ioctl_expbuf,
101 .vidioc_streamon = vb2_ioctl_streamon,
102 .vidioc_streamoff = vb2_ioctl_streamoff,
103 .vidioc_enum_fmt_meta_cap = rkisp1_stats_enum_fmt_meta_cap,
104 .vidioc_g_fmt_meta_cap = rkisp1_stats_g_fmt_meta_cap,
105 .vidioc_s_fmt_meta_cap = rkisp1_stats_g_fmt_meta_cap,
106 .vidioc_try_fmt_meta_cap = rkisp1_stats_g_fmt_meta_cap,
107 .vidioc_querycap = rkisp1_stats_querycap
108 };
109
110 struct v4l2_file_operations rkisp1_stats_fops = {
111 .mmap = vb2_fop_mmap,
112 .unlocked_ioctl = video_ioctl2,
113 .poll = vb2_fop_poll,
114 .open = v4l2_fh_open,
115 .release = vb2_fop_release
116 };
117
rkisp1_stats_vb2_queue_setup(struct vb2_queue * vq,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_ctxs[])118 static int rkisp1_stats_vb2_queue_setup(struct vb2_queue *vq,
119 unsigned int *num_buffers,
120 unsigned int *num_planes,
121 unsigned int sizes[],
122 struct device *alloc_ctxs[])
123 {
124 struct rkisp1_isp_stats_vdev *stats_vdev = vq->drv_priv;
125
126 *num_planes = 1;
127
128 *num_buffers = clamp_t(u32, *num_buffers, RKISP1_ISP_STATS_REQ_BUFS_MIN,
129 RKISP1_ISP_STATS_REQ_BUFS_MAX);
130
131 sizes[0] = sizeof(struct rkisp1_stat_buffer);
132
133 INIT_LIST_HEAD(&stats_vdev->stat);
134
135 return 0;
136 }
137
rkisp1_stats_vb2_buf_queue(struct vb2_buffer * vb)138 static void rkisp1_stats_vb2_buf_queue(struct vb2_buffer *vb)
139 {
140 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
141 struct rkisp1_buffer *stats_buf = to_rkisp1_buffer(vbuf);
142 struct vb2_queue *vq = vb->vb2_queue;
143 struct rkisp1_isp_stats_vdev *stats_dev = vq->drv_priv;
144
145 stats_buf->vaddr[0] = vb2_plane_vaddr(vb, 0);
146
147 spin_lock_bh(&stats_dev->rd_lock);
148 list_add_tail(&stats_buf->queue, &stats_dev->stat);
149 spin_unlock_bh(&stats_dev->rd_lock);
150 }
151
rkisp1_stats_vb2_stop_streaming(struct vb2_queue * vq)152 static void rkisp1_stats_vb2_stop_streaming(struct vb2_queue *vq)
153 {
154 struct rkisp1_isp_stats_vdev *stats_vdev = vq->drv_priv;
155 struct rkisp1_buffer *buf;
156 unsigned long flags;
157 int i;
158
159 /* Make sure no new work queued in isr before draining wq */
160 spin_lock_irqsave(&stats_vdev->irq_lock, flags);
161 stats_vdev->streamon = false;
162 spin_unlock_irqrestore(&stats_vdev->irq_lock, flags);
163
164 tasklet_disable(&stats_vdev->rd_tasklet);
165
166 spin_lock_bh(&stats_vdev->rd_lock);
167 for (i = 0; i < RKISP1_ISP_STATS_REQ_BUFS_MAX; i++) {
168 if (list_empty(&stats_vdev->stat))
169 break;
170 buf = list_first_entry(&stats_vdev->stat,
171 struct rkisp1_buffer, queue);
172 list_del(&buf->queue);
173 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
174 }
175 spin_unlock_bh(&stats_vdev->rd_lock);
176 }
177
178 static int
rkisp1_stats_vb2_start_streaming(struct vb2_queue * queue,unsigned int count)179 rkisp1_stats_vb2_start_streaming(struct vb2_queue *queue,
180 unsigned int count)
181 {
182 struct rkisp1_isp_stats_vdev *stats_vdev = queue->drv_priv;
183
184 stats_vdev->streamon = true;
185 kfifo_reset(&stats_vdev->rd_kfifo);
186 tasklet_enable(&stats_vdev->rd_tasklet);
187
188 return 0;
189 }
190
191 static struct vb2_ops rkisp1_stats_vb2_ops = {
192 .queue_setup = rkisp1_stats_vb2_queue_setup,
193 .buf_queue = rkisp1_stats_vb2_buf_queue,
194 .wait_prepare = vb2_ops_wait_prepare,
195 .wait_finish = vb2_ops_wait_finish,
196 .stop_streaming = rkisp1_stats_vb2_stop_streaming,
197 .start_streaming = rkisp1_stats_vb2_start_streaming,
198 };
199
rkisp1_stats_init_vb2_queue(struct vb2_queue * q,struct rkisp1_isp_stats_vdev * stats_vdev)200 static int rkisp1_stats_init_vb2_queue(struct vb2_queue *q,
201 struct rkisp1_isp_stats_vdev *stats_vdev)
202 {
203 q->type = V4L2_BUF_TYPE_META_CAPTURE;
204 q->io_modes = VB2_MMAP | VB2_USERPTR;
205 q->drv_priv = stats_vdev;
206 q->ops = &rkisp1_stats_vb2_ops;
207 q->mem_ops = &vb2_vmalloc_memops;
208 q->buf_struct_size = sizeof(struct rkisp1_buffer);
209 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
210 q->lock = &stats_vdev->dev->iqlock;
211 q->dev = stats_vdev->dev->dev;
212
213 return vb2_queue_init(q);
214 }
215
rkisp1_stats_get_awb_meas_v10(struct rkisp1_isp_stats_vdev * stats_vdev,struct rkisp1_stat_buffer * pbuf)216 static void rkisp1_stats_get_awb_meas_v10(struct rkisp1_isp_stats_vdev *stats_vdev,
217 struct rkisp1_stat_buffer *pbuf)
218 {
219 /* Protect against concurrent access from ISR? */
220 u32 reg_val;
221
222 pbuf->meas_type |= CIFISP_STAT_AWB;
223 reg_val = readl(stats_vdev->dev->base_addr + CIF_ISP_AWB_WHITE_CNT_V10);
224 pbuf->params.awb.awb_mean[0].cnt = CIF_ISP_AWB_GET_PIXEL_CNT(reg_val);
225 reg_val = readl(stats_vdev->dev->base_addr + CIF_ISP_AWB_MEAN_V10);
226
227 pbuf->params.awb.awb_mean[0].mean_cr_or_r =
228 CIF_ISP_AWB_GET_MEAN_CR_R(reg_val);
229 pbuf->params.awb.awb_mean[0].mean_cb_or_b =
230 CIF_ISP_AWB_GET_MEAN_CB_B(reg_val);
231 pbuf->params.awb.awb_mean[0].mean_y_or_g =
232 CIF_ISP_AWB_GET_MEAN_Y_G(reg_val);
233 }
234
rkisp1_stats_get_awb_meas_v12(struct rkisp1_isp_stats_vdev * stats_vdev,struct rkisp1_stat_buffer * pbuf)235 static void rkisp1_stats_get_awb_meas_v12(struct rkisp1_isp_stats_vdev *stats_vdev,
236 struct rkisp1_stat_buffer *pbuf)
237 {
238 /* Protect against concurrent access from ISR? */
239 u32 reg_val;
240
241 pbuf->meas_type |= CIFISP_STAT_AWB;
242 reg_val = readl(stats_vdev->dev->base_addr + CIF_ISP_AWB_WHITE_CNT_V12);
243 pbuf->params.awb.awb_mean[0].cnt = CIF_ISP_AWB_GET_PIXEL_CNT(reg_val);
244 reg_val = readl(stats_vdev->dev->base_addr + CIF_ISP_AWB_MEAN_V12);
245
246 pbuf->params.awb.awb_mean[0].mean_cr_or_r =
247 CIF_ISP_AWB_GET_MEAN_CR_R(reg_val);
248 pbuf->params.awb.awb_mean[0].mean_cb_or_b =
249 CIF_ISP_AWB_GET_MEAN_CB_B(reg_val);
250 pbuf->params.awb.awb_mean[0].mean_y_or_g =
251 CIF_ISP_AWB_GET_MEAN_Y_G(reg_val);
252 }
253
rkisp1_stats_get_aec_meas_v10(struct rkisp1_isp_stats_vdev * stats_vdev,struct rkisp1_stat_buffer * pbuf)254 static void rkisp1_stats_get_aec_meas_v10(struct rkisp1_isp_stats_vdev *stats_vdev,
255 struct rkisp1_stat_buffer *pbuf)
256 {
257 unsigned int i;
258 void __iomem *addr = stats_vdev->dev->base_addr + CIF_ISP_EXP_MEAN_00_V10;
259
260 pbuf->meas_type |= CIFISP_STAT_AUTOEXP;
261 for (i = 0; i < stats_vdev->config->ae_mean_max; i++)
262 pbuf->params.ae.exp_mean[i] = (u8)readl(addr + i * 4);
263 }
264
rkisp1_stats_get_aec_meas_v12(struct rkisp1_isp_stats_vdev * stats_vdev,struct rkisp1_stat_buffer * pbuf)265 static void rkisp1_stats_get_aec_meas_v12(struct rkisp1_isp_stats_vdev *stats_vdev,
266 struct rkisp1_stat_buffer *pbuf)
267 {
268 int i;
269 void __iomem *addr = stats_vdev->dev->base_addr + CIF_ISP_EXP_MEAN_V12;
270 u32 value;
271
272 pbuf->meas_type |= CIFISP_STAT_AUTOEXP;
273 for (i = 0; i < stats_vdev->config->ae_mean_max / 4; i++) {
274 value = readl(addr + i * 4);
275 pbuf->params.ae.exp_mean[4 * i + 0] = CIF_ISP_EXP_GET_MEAN_xy0_V12(value);
276 pbuf->params.ae.exp_mean[4 * i + 1] = CIF_ISP_EXP_GET_MEAN_xy1_V12(value);
277 pbuf->params.ae.exp_mean[4 * i + 2] = CIF_ISP_EXP_GET_MEAN_xy2_V12(value);
278 pbuf->params.ae.exp_mean[4 * i + 3] = CIF_ISP_EXP_GET_MEAN_xy3_V12(value);
279 }
280 value = readl(addr + i * 4);
281 pbuf->params.ae.exp_mean[4 * i + 0] = CIF_ISP_EXP_GET_MEAN_xy0_V12(value);
282 }
283
rkisp1_stats_get_afc_meas(struct rkisp1_isp_stats_vdev * stats_vdev,struct rkisp1_stat_buffer * pbuf)284 static void rkisp1_stats_get_afc_meas(struct rkisp1_isp_stats_vdev *stats_vdev,
285 struct rkisp1_stat_buffer *pbuf)
286 {
287 void __iomem *base_addr;
288 struct cifisp_af_stat *af;
289
290 pbuf->meas_type |= CIFISP_STAT_AFM_FIN;
291
292 af = &pbuf->params.af;
293 base_addr = stats_vdev->dev->base_addr;
294 af->window[0].sum = readl(base_addr + CIF_ISP_AFM_SUM_A);
295 af->window[0].lum = readl(base_addr + CIF_ISP_AFM_LUM_A);
296 af->window[1].sum = readl(base_addr + CIF_ISP_AFM_SUM_B);
297 af->window[1].lum = readl(base_addr + CIF_ISP_AFM_LUM_B);
298 af->window[2].sum = readl(base_addr + CIF_ISP_AFM_SUM_C);
299 af->window[2].lum = readl(base_addr + CIF_ISP_AFM_LUM_C);
300 }
301
rkisp1_stats_get_hst_meas_v10(struct rkisp1_isp_stats_vdev * stats_vdev,struct rkisp1_stat_buffer * pbuf)302 static void rkisp1_stats_get_hst_meas_v10(struct rkisp1_isp_stats_vdev *stats_vdev,
303 struct rkisp1_stat_buffer *pbuf)
304 {
305 int i;
306 void __iomem *addr = stats_vdev->dev->base_addr + CIF_ISP_HIST_BIN_0_V10;
307
308 pbuf->meas_type |= CIFISP_STAT_HIST;
309 for (i = 0; i < stats_vdev->config->hist_bin_n_max; i++)
310 pbuf->params.hist.hist_bins[i] = readl(addr + (i * 4));
311 }
312
rkisp1_stats_get_hst_meas_v12(struct rkisp1_isp_stats_vdev * stats_vdev,struct rkisp1_stat_buffer * pbuf)313 static void rkisp1_stats_get_hst_meas_v12(struct rkisp1_isp_stats_vdev *stats_vdev,
314 struct rkisp1_stat_buffer *pbuf)
315 {
316 int i;
317 void __iomem *addr = stats_vdev->dev->base_addr + CIF_ISP_HIST_BIN_V12;
318 u32 value;
319
320 pbuf->meas_type |= CIFISP_STAT_HIST;
321 for (i = 0; i < stats_vdev->config->hist_bin_n_max / 2; i++) {
322 value = readl(addr + (i * 4));
323 pbuf->params.hist.hist_bins[2 * i] = CIF_ISP_HIST_GET_BIN0_V12(value);
324 pbuf->params.hist.hist_bins[2 * i + 1] = CIF_ISP_HIST_GET_BIN1_V12(value);
325 }
326 }
327
rkisp1_stats_get_bls_meas(struct rkisp1_isp_stats_vdev * stats_vdev,struct rkisp1_stat_buffer * pbuf)328 static void rkisp1_stats_get_bls_meas(struct rkisp1_isp_stats_vdev *stats_vdev,
329 struct rkisp1_stat_buffer *pbuf)
330 {
331 struct rkisp1_device *dev = stats_vdev->dev;
332 const struct ispsd_in_fmt *in_fmt =
333 rkisp1_get_ispsd_in_fmt(&dev->isp_sdev);
334 void __iomem *base = stats_vdev->dev->base_addr;
335 struct cifisp_bls_meas_val *bls_val;
336
337 bls_val = &pbuf->params.ae.bls_val;
338 if (in_fmt->bayer_pat == RAW_BGGR) {
339 bls_val->meas_b = readl(base + CIF_ISP_BLS_A_MEASURED);
340 bls_val->meas_gb = readl(base + CIF_ISP_BLS_B_MEASURED);
341 bls_val->meas_gr = readl(base + CIF_ISP_BLS_C_MEASURED);
342 bls_val->meas_r = readl(base + CIF_ISP_BLS_D_MEASURED);
343 } else if (in_fmt->bayer_pat == RAW_GBRG) {
344 bls_val->meas_gb = readl(base + CIF_ISP_BLS_A_MEASURED);
345 bls_val->meas_b = readl(base + CIF_ISP_BLS_B_MEASURED);
346 bls_val->meas_r = readl(base + CIF_ISP_BLS_C_MEASURED);
347 bls_val->meas_gr = readl(base + CIF_ISP_BLS_D_MEASURED);
348 } else if (in_fmt->bayer_pat == RAW_GRBG) {
349 bls_val->meas_gr = readl(base + CIF_ISP_BLS_A_MEASURED);
350 bls_val->meas_r = readl(base + CIF_ISP_BLS_B_MEASURED);
351 bls_val->meas_b = readl(base + CIF_ISP_BLS_C_MEASURED);
352 bls_val->meas_gb = readl(base + CIF_ISP_BLS_D_MEASURED);
353 } else if (in_fmt->bayer_pat == RAW_RGGB) {
354 bls_val->meas_r = readl(base + CIF_ISP_BLS_A_MEASURED);
355 bls_val->meas_gr = readl(base + CIF_ISP_BLS_B_MEASURED);
356 bls_val->meas_gb = readl(base + CIF_ISP_BLS_C_MEASURED);
357 bls_val->meas_b = readl(base + CIF_ISP_BLS_D_MEASURED);
358 }
359 }
360
rkisp1_stats_get_emb_data(struct rkisp1_isp_stats_vdev * stats_vdev,struct rkisp1_stat_buffer * pbuf)361 static void rkisp1_stats_get_emb_data(struct rkisp1_isp_stats_vdev *stats_vdev,
362 struct rkisp1_stat_buffer *pbuf)
363 {
364 unsigned int i;
365 struct rkisp1_device *dev = stats_vdev->dev;
366 unsigned int ph = 0, out = 0, packet_len = 0, playload_len = 0;
367 unsigned int mipi_kfifo_len;
368 unsigned int idx;
369 unsigned char *fifo_data;
370
371 idx = RKISP1_EMDDATA_FIFO_MAX;
372 for (i = 0; i < RKISP1_EMDDATA_FIFO_MAX; i++) {
373 if (dev->emd_data_fifo[i].frame_id == pbuf->frame_id) {
374 idx = i;
375 break;
376 }
377 }
378
379 if (idx == RKISP1_EMDDATA_FIFO_MAX)
380 return;
381
382 if (kfifo_is_empty(&dev->emd_data_fifo[idx].mipi_kfifo))
383 return;
384
385 mipi_kfifo_len = dev->emd_data_fifo[idx].data_len;
386 fifo_data = &pbuf->params.emd.data[0];
387 for (i = 0; i < mipi_kfifo_len;) {
388 /* handle the package header */
389 out = kfifo_out(&dev->emd_data_fifo[idx].mipi_kfifo,
390 &ph, sizeof(ph));
391 if (!out)
392 break;
393 packet_len = (ph >> 8) & 0xfff;
394 i += sizeof(ph);
395
396 /* handle the package data */
397 out = kfifo_out(&dev->emd_data_fifo[idx].mipi_kfifo,
398 fifo_data, packet_len);
399 if (!out)
400 break;
401
402 i += packet_len;
403 playload_len += packet_len;
404 fifo_data += packet_len;
405
406 v4l2_dbg(1, rkisp1_debug, &dev->v4l2_dev,
407 "packet_len: 0x%x, ph: 0x%x\n",
408 packet_len, ph);
409 }
410
411 pbuf->meas_type |= CIFISP_STAT_EMB_DATA;
412
413 v4l2_dbg(1, rkisp1_debug, &dev->v4l2_dev,
414 "playload_len: %d, pbuf->frame_id %d\n",
415 playload_len, pbuf->frame_id);
416 }
417
418 static struct rkisp1_stats_ops rkisp1_v10_stats_ops = {
419 .get_awb_meas = rkisp1_stats_get_awb_meas_v10,
420 .get_aec_meas = rkisp1_stats_get_aec_meas_v10,
421 .get_afc_meas = rkisp1_stats_get_afc_meas,
422 .get_hst_meas = rkisp1_stats_get_hst_meas_v10,
423 .get_bls_meas = rkisp1_stats_get_bls_meas,
424 .get_emb_data = rkisp1_stats_get_emb_data,
425 };
426
427 static struct rkisp1_stats_ops rkisp1_v12_stats_ops = {
428 .get_awb_meas = rkisp1_stats_get_awb_meas_v12,
429 .get_aec_meas = rkisp1_stats_get_aec_meas_v12,
430 .get_afc_meas = rkisp1_stats_get_afc_meas,
431 .get_hst_meas = rkisp1_stats_get_hst_meas_v12,
432 .get_bls_meas = rkisp1_stats_get_bls_meas,
433 };
434
435 static struct rkisp1_stats_config rkisp1_v10_stats_config = {
436 .ae_mean_max = 25,
437 .hist_bin_n_max = 16,
438 };
439
440 static struct rkisp1_stats_config rkisp1_v12_stats_config = {
441 .ae_mean_max = 81,
442 .hist_bin_n_max = 32,
443 };
444
445 static void
rkisp1_stats_send_measurement(struct rkisp1_isp_stats_vdev * stats_vdev,struct rkisp1_isp_readout_work * meas_work)446 rkisp1_stats_send_measurement(struct rkisp1_isp_stats_vdev *stats_vdev,
447 struct rkisp1_isp_readout_work *meas_work)
448 {
449 unsigned int cur_frame_id = -1;
450 struct rkisp1_stat_buffer *cur_stat_buf;
451 struct rkisp1_buffer *cur_buf = NULL;
452 struct rkisp1_stats_ops *ops = stats_vdev->ops;
453
454 cur_frame_id = atomic_read(&stats_vdev->dev->isp_sdev.frm_sync_seq) - 1;
455 if (cur_frame_id != meas_work->frame_id) {
456 v4l2_warn(stats_vdev->vnode.vdev.v4l2_dev,
457 "Measurement late(%d, %d)\n",
458 cur_frame_id, meas_work->frame_id);
459 cur_frame_id = meas_work->frame_id;
460 }
461
462 spin_lock(&stats_vdev->rd_lock);
463 /* get one empty buffer */
464 if (!list_empty(&stats_vdev->stat)) {
465 cur_buf = list_first_entry(&stats_vdev->stat,
466 struct rkisp1_buffer, queue);
467 list_del(&cur_buf->queue);
468 }
469 spin_unlock(&stats_vdev->rd_lock);
470
471 if (!cur_buf)
472 return;
473
474 cur_stat_buf =
475 (struct rkisp1_stat_buffer *)(cur_buf->vaddr[0]);
476 memset(cur_stat_buf, 0, sizeof(*cur_stat_buf));
477 cur_stat_buf->frame_id = cur_frame_id;
478 if (meas_work->isp_ris & CIF_ISP_AWB_DONE) {
479 ops->get_awb_meas(stats_vdev, cur_stat_buf);
480 cur_stat_buf->meas_type |= CIFISP_STAT_AWB;
481 }
482
483 if (meas_work->isp_ris & CIF_ISP_AFM_FIN) {
484 ops->get_afc_meas(stats_vdev, cur_stat_buf);
485 cur_stat_buf->meas_type |= CIFISP_STAT_AFM_FIN;
486 }
487
488 if (meas_work->isp_ris & CIF_ISP_EXP_END) {
489 ops->get_aec_meas(stats_vdev, cur_stat_buf);
490 ops->get_bls_meas(stats_vdev, cur_stat_buf);
491 cur_stat_buf->meas_type |= CIFISP_STAT_AUTOEXP;
492 }
493
494 if (meas_work->isp_ris & CIF_ISP_HIST_MEASURE_RDY) {
495 ops->get_hst_meas(stats_vdev, cur_stat_buf);
496 cur_stat_buf->meas_type |= CIFISP_STAT_HIST;
497 }
498
499 if ((meas_work->isp_ris & CIF_ISP_FRAME) &&
500 ops->get_emb_data)
501 ops->get_emb_data(stats_vdev, cur_stat_buf);
502
503 vb2_set_plane_payload(&cur_buf->vb.vb2_buf, 0,
504 sizeof(struct rkisp1_stat_buffer));
505 cur_buf->vb.sequence = cur_frame_id;
506 cur_buf->vb.vb2_buf.timestamp = meas_work->timestamp;
507 vb2_buffer_done(&cur_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
508 }
509
rkisp1_stats_readout_task(unsigned long data)510 static void rkisp1_stats_readout_task(unsigned long data)
511 {
512 unsigned int out = 0;
513 struct rkisp1_isp_readout_work work;
514 struct rkisp1_isp_stats_vdev *vdev =
515 (struct rkisp1_isp_stats_vdev *)data;
516
517 while (!kfifo_is_empty(&vdev->rd_kfifo)) {
518 out = kfifo_out(&vdev->rd_kfifo,
519 &work, sizeof(work));
520 if (!out)
521 break;
522
523 if (work.readout == RKISP1_ISP_READOUT_MEAS)
524 rkisp1_stats_send_measurement(vdev, &work);
525 }
526 }
527
rkisp1_stats_isr(struct rkisp1_isp_stats_vdev * stats_vdev,u32 isp_ris)528 int rkisp1_stats_isr(struct rkisp1_isp_stats_vdev *stats_vdev, u32 isp_ris)
529 {
530 unsigned int isp_mis_tmp = 0;
531 struct rkisp1_isp_readout_work work;
532 unsigned int cur_frame_id =
533 atomic_read(&stats_vdev->dev->isp_sdev.frm_sync_seq) - 1;
534 #ifdef LOG_ISR_EXE_TIME
535 ktime_t in_t = ktime_get();
536 #endif
537
538 spin_lock(&stats_vdev->irq_lock);
539
540 isp_mis_tmp = isp_ris & (CIF_ISP_AWB_DONE | CIF_ISP_AFM_FIN |
541 CIF_ISP_EXP_END | CIF_ISP_HIST_MEASURE_RDY);
542 if (isp_mis_tmp) {
543 writel(isp_mis_tmp,
544 stats_vdev->dev->base_addr + CIF_ISP_ICR);
545
546 isp_mis_tmp &= readl(stats_vdev->dev->base_addr + CIF_ISP_MIS);
547 if (isp_mis_tmp)
548 v4l2_err(stats_vdev->vnode.vdev.v4l2_dev,
549 "isp icr 3A info err: 0x%x 0x%x\n",
550 isp_mis_tmp, isp_ris);
551 }
552
553 if (!stats_vdev->streamon)
554 goto unlock;
555
556 if (isp_ris & (CIF_ISP_FRAME | CIF_ISP_AWB_DONE |
557 CIF_ISP_AFM_FIN | CIF_ISP_EXP_END |
558 CIF_ISP_HIST_MEASURE_RDY)) {
559 work.readout = RKISP1_ISP_READOUT_MEAS;
560 work.frame_id = cur_frame_id;
561 work.isp_ris = isp_ris;
562 work.timestamp = ktime_get_ns();
563 if (!kfifo_is_full(&stats_vdev->rd_kfifo))
564 kfifo_in(&stats_vdev->rd_kfifo,
565 &work, sizeof(work));
566 else
567 v4l2_err(stats_vdev->vnode.vdev.v4l2_dev,
568 "stats kfifo is full\n");
569
570 tasklet_schedule(&stats_vdev->rd_tasklet);
571 }
572
573 #ifdef LOG_ISR_EXE_TIME
574 if (isp_ris & (CIF_ISP_EXP_END | CIF_ISP_AWB_DONE |
575 CIF_ISP_FRAME | CIF_ISP_HIST_MEASURE_RDY)) {
576 unsigned int diff_us =
577 ktime_to_us(ktime_sub(ktime_get(), in_t));
578
579 if (diff_us > g_longest_isr_time)
580 g_longest_isr_time = diff_us;
581
582 v4l2_info(stats_vdev->vnode.vdev.v4l2_dev,
583 "isp_isr time %d %d\n", diff_us, g_longest_isr_time);
584 }
585 #endif
586
587 unlock:
588 spin_unlock(&stats_vdev->irq_lock);
589
590 return 0;
591 }
592
rkisp1_init_stats_vdev(struct rkisp1_isp_stats_vdev * stats_vdev)593 static void rkisp1_init_stats_vdev(struct rkisp1_isp_stats_vdev *stats_vdev)
594 {
595 stats_vdev->vdev_fmt.fmt.meta.dataformat =
596 V4L2_META_FMT_RK_ISP1_STAT_3A;
597 stats_vdev->vdev_fmt.fmt.meta.buffersize =
598 sizeof(struct rkisp1_stat_buffer);
599
600 if (stats_vdev->dev->isp_ver == ISP_V12 ||
601 stats_vdev->dev->isp_ver == ISP_V13) {
602 stats_vdev->ops = &rkisp1_v12_stats_ops;
603 stats_vdev->config = &rkisp1_v12_stats_config;
604 } else {
605 stats_vdev->ops = &rkisp1_v10_stats_ops;
606 stats_vdev->config = &rkisp1_v10_stats_config;
607 }
608 }
609
rkisp1_register_stats_vdev(struct rkisp1_isp_stats_vdev * stats_vdev,struct v4l2_device * v4l2_dev,struct rkisp1_device * dev)610 int rkisp1_register_stats_vdev(struct rkisp1_isp_stats_vdev *stats_vdev,
611 struct v4l2_device *v4l2_dev,
612 struct rkisp1_device *dev)
613 {
614 int ret;
615 struct rkisp1_vdev_node *node = &stats_vdev->vnode;
616 struct video_device *vdev = &node->vdev;
617
618 stats_vdev->dev = dev;
619 INIT_LIST_HEAD(&stats_vdev->stat);
620 spin_lock_init(&stats_vdev->irq_lock);
621 spin_lock_init(&stats_vdev->rd_lock);
622
623 strlcpy(vdev->name, "rkisp1-statistics", sizeof(vdev->name));
624
625 video_set_drvdata(vdev, stats_vdev);
626 vdev->ioctl_ops = &rkisp1_stats_ioctl;
627 vdev->fops = &rkisp1_stats_fops;
628 vdev->release = video_device_release_empty;
629 vdev->lock = &dev->iqlock;
630 vdev->v4l2_dev = v4l2_dev;
631 vdev->queue = &node->buf_queue;
632 vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
633 vdev->vfl_dir = VFL_DIR_RX;
634 rkisp1_stats_init_vb2_queue(vdev->queue, stats_vdev);
635 rkisp1_init_stats_vdev(stats_vdev);
636 video_set_drvdata(vdev, stats_vdev);
637
638 node->pad.flags = MEDIA_PAD_FL_SINK;
639 ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
640 if (ret < 0)
641 goto err_release_queue;
642
643 ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
644 if (ret < 0) {
645 dev_err(&vdev->dev,
646 "could not register Video for Linux device\n");
647 goto err_cleanup_media_entity;
648 }
649
650 ret = kfifo_alloc(&stats_vdev->rd_kfifo,
651 RKISP1_READOUT_WORK_SIZE,
652 GFP_KERNEL);
653 if (ret) {
654 dev_err(&vdev->dev,
655 "kfifo_alloc failed with error %d\n",
656 ret);
657 goto err_cleanup_media_entity;
658 }
659
660 tasklet_init(&stats_vdev->rd_tasklet,
661 rkisp1_stats_readout_task,
662 (unsigned long)stats_vdev);
663 tasklet_disable(&stats_vdev->rd_tasklet);
664
665 return 0;
666
667 err_cleanup_media_entity:
668 media_entity_cleanup(&vdev->entity);
669 err_release_queue:
670 vb2_queue_release(vdev->queue);
671 return ret;
672 }
673
rkisp1_unregister_stats_vdev(struct rkisp1_isp_stats_vdev * stats_vdev)674 void rkisp1_unregister_stats_vdev(struct rkisp1_isp_stats_vdev *stats_vdev)
675 {
676 struct rkisp1_vdev_node *node = &stats_vdev->vnode;
677 struct video_device *vdev = &node->vdev;
678
679 kfifo_free(&stats_vdev->rd_kfifo);
680 tasklet_kill(&stats_vdev->rd_tasklet);
681 video_unregister_device(vdev);
682 media_entity_cleanup(&vdev->entity);
683 vb2_queue_release(vdev->queue);
684 }
685