1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd. */
3
4 #include <linux/kfifo.h>
5 #include <media/v4l2-common.h>
6 #include <media/v4l2-ioctl.h>
7 #include <media/videobuf2-core.h>
8 #include <media/videobuf2-dma-sg.h>
9 #include <media/videobuf2-vmalloc.h> /* for ISP statistics */
10 #include "dev.h"
11 #include "isp_stats.h"
12 #include "isp_stats_v1x.h"
13 #include "isp_stats_v2x.h"
14 #include "isp_stats_v21.h"
15 #include "isp_stats_v3x.h"
16 #include "isp_stats_v32.h"
17
18 #define STATS_NAME DRIVER_NAME "-statistics"
19 #define RKISP_ISP_STATS_REQ_BUFS_MIN 2
20 #define RKISP_ISP_STATS_REQ_BUFS_MAX 8
21
rkisp_stats_enum_fmt_meta_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)22 static int rkisp_stats_enum_fmt_meta_cap(struct file *file, void *priv,
23 struct v4l2_fmtdesc *f)
24 {
25 struct video_device *video = video_devdata(file);
26 struct rkisp_isp_stats_vdev *stats_vdev = video_get_drvdata(video);
27
28 if (f->index > 0 || f->type != video->queue->type)
29 return -EINVAL;
30
31 f->pixelformat = stats_vdev->vdev_fmt.fmt.meta.dataformat;
32 return 0;
33 }
34
rkisp_stats_g_fmt_meta_cap(struct file * file,void * priv,struct v4l2_format * f)35 static int rkisp_stats_g_fmt_meta_cap(struct file *file, void *priv,
36 struct v4l2_format *f)
37 {
38 struct video_device *video = video_devdata(file);
39 struct rkisp_isp_stats_vdev *stats_vdev = video_get_drvdata(video);
40 struct v4l2_meta_format *meta = &f->fmt.meta;
41
42 if (f->type != video->queue->type)
43 return -EINVAL;
44
45 memset(meta, 0, sizeof(*meta));
46 meta->dataformat = stats_vdev->vdev_fmt.fmt.meta.dataformat;
47 meta->buffersize = stats_vdev->vdev_fmt.fmt.meta.buffersize;
48
49 return 0;
50 }
51
rkisp_stats_querycap(struct file * file,void * priv,struct v4l2_capability * cap)52 static int rkisp_stats_querycap(struct file *file,
53 void *priv, struct v4l2_capability *cap)
54 {
55 struct video_device *vdev = video_devdata(file);
56 struct rkisp_isp_stats_vdev *stats_vdev = video_get_drvdata(vdev);
57
58 strcpy(cap->driver, DRIVER_NAME);
59 snprintf(cap->driver, sizeof(cap->driver),
60 "%s_v%d", DRIVER_NAME,
61 stats_vdev->dev->isp_ver >> 4);
62 strlcpy(cap->card, vdev->name, sizeof(cap->card));
63 strlcpy(cap->bus_info, "platform: " DRIVER_NAME, sizeof(cap->bus_info));
64 cap->version = RKISP_DRIVER_VERSION;
65 return 0;
66 }
67
68 /* ISP video device IOCTLs */
69 static const struct v4l2_ioctl_ops rkisp_stats_ioctl = {
70 .vidioc_reqbufs = vb2_ioctl_reqbufs,
71 .vidioc_querybuf = vb2_ioctl_querybuf,
72 .vidioc_create_bufs = vb2_ioctl_create_bufs,
73 .vidioc_qbuf = vb2_ioctl_qbuf,
74 .vidioc_dqbuf = vb2_ioctl_dqbuf,
75 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
76 .vidioc_expbuf = vb2_ioctl_expbuf,
77 .vidioc_streamon = vb2_ioctl_streamon,
78 .vidioc_streamoff = vb2_ioctl_streamoff,
79 .vidioc_enum_fmt_meta_cap = rkisp_stats_enum_fmt_meta_cap,
80 .vidioc_g_fmt_meta_cap = rkisp_stats_g_fmt_meta_cap,
81 .vidioc_s_fmt_meta_cap = rkisp_stats_g_fmt_meta_cap,
82 .vidioc_try_fmt_meta_cap = rkisp_stats_g_fmt_meta_cap,
83 .vidioc_querycap = rkisp_stats_querycap
84 };
85
rkisp_stats_fh_open(struct file * filp)86 static int rkisp_stats_fh_open(struct file *filp)
87 {
88 struct rkisp_isp_stats_vdev *stats = video_drvdata(filp);
89 int ret;
90
91 if (!stats->dev->is_probe_end)
92 return -EINVAL;
93
94 ret = v4l2_fh_open(filp);
95 if (!ret) {
96 ret = v4l2_pipeline_pm_get(&stats->vnode.vdev.entity);
97 if (ret < 0)
98 vb2_fop_release(filp);
99 }
100
101 return ret;
102 }
103
rkisp_stats_fop_release(struct file * file)104 static int rkisp_stats_fop_release(struct file *file)
105 {
106 struct rkisp_isp_stats_vdev *stats = video_drvdata(file);
107 int ret;
108
109 ret = vb2_fop_release(file);
110 if (!ret)
111 v4l2_pipeline_pm_put(&stats->vnode.vdev.entity);
112 return ret;
113 }
114
115 struct v4l2_file_operations rkisp_stats_fops = {
116 .mmap = vb2_fop_mmap,
117 .unlocked_ioctl = video_ioctl2,
118 .poll = vb2_fop_poll,
119 .open = rkisp_stats_fh_open,
120 .release = rkisp_stats_fop_release
121 };
122
rkisp_stats_vb2_queue_setup(struct vb2_queue * vq,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_ctxs[])123 static int rkisp_stats_vb2_queue_setup(struct vb2_queue *vq,
124 unsigned int *num_buffers,
125 unsigned int *num_planes,
126 unsigned int sizes[],
127 struct device *alloc_ctxs[])
128 {
129 struct rkisp_isp_stats_vdev *stats_vdev = vq->drv_priv;
130
131 *num_planes = 1;
132
133 *num_buffers = clamp_t(u32, *num_buffers, RKISP_ISP_STATS_REQ_BUFS_MIN,
134 RKISP_ISP_STATS_REQ_BUFS_MAX);
135
136 sizes[0] = stats_vdev->vdev_fmt.fmt.meta.buffersize;
137 INIT_LIST_HEAD(&stats_vdev->stat);
138
139 return 0;
140 }
141
rkisp_stats_vb2_buf_queue(struct vb2_buffer * vb)142 static void rkisp_stats_vb2_buf_queue(struct vb2_buffer *vb)
143 {
144 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
145 struct rkisp_buffer *stats_buf = to_rkisp_buffer(vbuf);
146 struct vb2_queue *vq = vb->vb2_queue;
147 struct rkisp_isp_stats_vdev *stats_dev = vq->drv_priv;
148 u32 size = stats_dev->vdev_fmt.fmt.meta.buffersize;
149 unsigned long flags;
150
151 stats_buf->vaddr[0] = vb2_plane_vaddr(vb, 0);
152 if (stats_dev->dev->isp_ver == ISP_V32) {
153 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
154
155 stats_buf->buff_addr[0] = sg_dma_address(sgt->sgl);
156 }
157 if (stats_buf->vaddr[0])
158 memset(stats_buf->vaddr[0], 0, size);
159 spin_lock_irqsave(&stats_dev->rd_lock, flags);
160 if (stats_dev->dev->isp_ver == ISP_V32 && stats_dev->dev->is_pre_on) {
161 struct rkisp32_isp_stat_buffer *buf = stats_dev->stats_buf[0].vaddr;
162
163 if (buf && !buf->frame_id && buf->meas_type && stats_buf->vaddr[0]) {
164 dev_info(stats_dev->dev->dev,
165 "tb stat seq:%d meas_type:0x%x\n",
166 buf->frame_id, buf->meas_type);
167 memcpy(stats_buf->vaddr[0], buf, sizeof(struct rkisp32_isp_stat_buffer));
168 buf->meas_type = 0;
169 vb2_set_plane_payload(vb, 0, sizeof(struct rkisp32_isp_stat_buffer));
170 vbuf->sequence = buf->frame_id;
171 spin_unlock_irqrestore(&stats_dev->rd_lock, flags);
172 vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
173 return;
174 }
175 }
176 list_add_tail(&stats_buf->queue, &stats_dev->stat);
177 spin_unlock_irqrestore(&stats_dev->rd_lock, flags);
178 }
179
rkisp_stats_vb2_stop_streaming(struct vb2_queue * vq)180 static void rkisp_stats_vb2_stop_streaming(struct vb2_queue *vq)
181 {
182 struct rkisp_isp_stats_vdev *stats_vdev = vq->drv_priv;
183 struct rkisp_buffer *buf;
184 unsigned long flags;
185 int i;
186
187 /* Make sure no new work queued in isr before draining wq */
188 spin_lock_irqsave(&stats_vdev->irq_lock, flags);
189 stats_vdev->streamon = false;
190 spin_unlock_irqrestore(&stats_vdev->irq_lock, flags);
191
192 tasklet_disable(&stats_vdev->rd_tasklet);
193
194 spin_lock_irqsave(&stats_vdev->rd_lock, flags);
195 for (i = 0; i < RKISP_ISP_STATS_REQ_BUFS_MAX; i++) {
196 if (list_empty(&stats_vdev->stat))
197 break;
198 buf = list_first_entry(&stats_vdev->stat,
199 struct rkisp_buffer, queue);
200 list_del(&buf->queue);
201 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
202 }
203 if (stats_vdev->cur_buf) {
204 vb2_buffer_done(&stats_vdev->cur_buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
205 if (stats_vdev->cur_buf == stats_vdev->nxt_buf)
206 stats_vdev->nxt_buf = NULL;
207 stats_vdev->cur_buf = NULL;
208 }
209 if (stats_vdev->nxt_buf) {
210 vb2_buffer_done(&stats_vdev->nxt_buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
211 stats_vdev->nxt_buf = NULL;
212 }
213 spin_unlock_irqrestore(&stats_vdev->rd_lock, flags);
214
215 stats_vdev->ae_meas_done_next = false;
216 stats_vdev->af_meas_done_next = false;
217 }
218
219 static int
rkisp_stats_vb2_start_streaming(struct vb2_queue * queue,unsigned int count)220 rkisp_stats_vb2_start_streaming(struct vb2_queue *queue,
221 unsigned int count)
222 {
223 struct rkisp_isp_stats_vdev *stats_vdev = queue->drv_priv;
224
225 stats_vdev->cur_buf = NULL;
226 stats_vdev->ops->rdbk_enable(stats_vdev, false);
227 stats_vdev->streamon = true;
228 kfifo_reset(&stats_vdev->rd_kfifo);
229 tasklet_enable(&stats_vdev->rd_tasklet);
230
231 return 0;
232 }
233
234 static struct vb2_ops rkisp_stats_vb2_ops = {
235 .queue_setup = rkisp_stats_vb2_queue_setup,
236 .buf_queue = rkisp_stats_vb2_buf_queue,
237 .wait_prepare = vb2_ops_wait_prepare,
238 .wait_finish = vb2_ops_wait_finish,
239 .stop_streaming = rkisp_stats_vb2_stop_streaming,
240 .start_streaming = rkisp_stats_vb2_start_streaming,
241 };
242
rkisp_stats_init_vb2_queue(struct vb2_queue * q,struct rkisp_isp_stats_vdev * stats_vdev)243 static int rkisp_stats_init_vb2_queue(struct vb2_queue *q,
244 struct rkisp_isp_stats_vdev *stats_vdev)
245 {
246 q->type = V4L2_BUF_TYPE_META_CAPTURE;
247 q->io_modes = VB2_MMAP | VB2_USERPTR;
248 q->drv_priv = stats_vdev;
249 q->ops = &rkisp_stats_vb2_ops;
250 if (stats_vdev->dev->isp_ver == ISP_V32) {
251 q->mem_ops = stats_vdev->dev->hw_dev->mem_ops;
252 if (stats_vdev->dev->hw_dev->is_dma_contig)
253 q->dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
254 } else {
255 q->mem_ops = &vb2_vmalloc_memops;
256 }
257 q->buf_struct_size = sizeof(struct rkisp_buffer);
258 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
259 q->lock = &stats_vdev->dev->iqlock;
260 q->dev = stats_vdev->dev->dev;
261 return vb2_queue_init(q);
262 }
263
rkisp_stats_readout_task(unsigned long data)264 static void rkisp_stats_readout_task(unsigned long data)
265 {
266 unsigned int out = 0;
267 struct rkisp_isp_readout_work work;
268 struct rkisp_isp_stats_vdev *vdev =
269 (struct rkisp_isp_stats_vdev *)data;
270
271 while (!kfifo_is_empty(&vdev->rd_kfifo)) {
272 out = kfifo_out(&vdev->rd_kfifo,
273 &work, sizeof(work));
274 if (!out)
275 break;
276
277 if (work.readout == RKISP_ISP_READOUT_MEAS)
278 vdev->ops->send_meas(vdev, &work);
279 }
280 }
281
rkisp_init_stats_vdev(struct rkisp_isp_stats_vdev * stats_vdev)282 static void rkisp_init_stats_vdev(struct rkisp_isp_stats_vdev *stats_vdev)
283 {
284 stats_vdev->rd_buf_idx = 0;
285 stats_vdev->wr_buf_idx = 0;
286 memset(stats_vdev->stats_buf, 0, sizeof(stats_vdev->stats_buf));
287
288 if (stats_vdev->dev->isp_ver <= ISP_V13)
289 rkisp_init_stats_vdev_v1x(stats_vdev);
290 else if (stats_vdev->dev->isp_ver == ISP_V21)
291 rkisp_init_stats_vdev_v21(stats_vdev);
292 else if (stats_vdev->dev->isp_ver == ISP_V20)
293 rkisp_init_stats_vdev_v2x(stats_vdev);
294 else if (stats_vdev->dev->isp_ver == ISP_V30)
295 rkisp_init_stats_vdev_v3x(stats_vdev);
296 else
297 rkisp_init_stats_vdev_v32(stats_vdev);
298 }
299
rkisp_uninit_stats_vdev(struct rkisp_isp_stats_vdev * stats_vdev)300 static void rkisp_uninit_stats_vdev(struct rkisp_isp_stats_vdev *stats_vdev)
301 {
302 if (stats_vdev->dev->isp_ver <= ISP_V13)
303 rkisp_uninit_stats_vdev_v1x(stats_vdev);
304 else if (stats_vdev->dev->isp_ver == ISP_V21)
305 rkisp_uninit_stats_vdev_v21(stats_vdev);
306 else if (stats_vdev->dev->isp_ver == ISP_V20)
307 rkisp_uninit_stats_vdev_v2x(stats_vdev);
308 else if (stats_vdev->dev->isp_ver == ISP_V30)
309 rkisp_uninit_stats_vdev_v3x(stats_vdev);
310 else
311 rkisp_uninit_stats_vdev_v32(stats_vdev);
312 }
313
rkisp_stats_rdbk_enable(struct rkisp_isp_stats_vdev * stats_vdev,bool en)314 void rkisp_stats_rdbk_enable(struct rkisp_isp_stats_vdev *stats_vdev, bool en)
315 {
316 stats_vdev->ops->rdbk_enable(stats_vdev, en);
317 }
318
rkisp_stats_first_ddr_config(struct rkisp_isp_stats_vdev * stats_vdev)319 void rkisp_stats_first_ddr_config(struct rkisp_isp_stats_vdev *stats_vdev)
320 {
321 if (stats_vdev->dev->isp_ver == ISP_V20)
322 rkisp_stats_first_ddr_config_v2x(stats_vdev);
323 else if (stats_vdev->dev->isp_ver == ISP_V21)
324 rkisp_stats_first_ddr_config_v21(stats_vdev);
325 else if (stats_vdev->dev->isp_ver == ISP_V30)
326 rkisp_stats_first_ddr_config_v3x(stats_vdev);
327 else if (stats_vdev->dev->isp_ver == ISP_V32)
328 rkisp_stats_first_ddr_config_v32(stats_vdev);
329 }
330
rkisp_stats_next_ddr_config(struct rkisp_isp_stats_vdev * stats_vdev)331 void rkisp_stats_next_ddr_config(struct rkisp_isp_stats_vdev *stats_vdev)
332 {
333 if (stats_vdev->dev->isp_ver == ISP_V32)
334 rkisp_stats_next_ddr_config_v32(stats_vdev);
335 }
336
rkisp_stats_isr(struct rkisp_isp_stats_vdev * stats_vdev,u32 isp_ris,u32 isp3a_ris)337 void rkisp_stats_isr(struct rkisp_isp_stats_vdev *stats_vdev,
338 u32 isp_ris, u32 isp3a_ris)
339 {
340 stats_vdev->ops->isr_hdl(stats_vdev, isp_ris, isp3a_ris);
341 }
342
rkisp_register_stats_vdev(struct rkisp_isp_stats_vdev * stats_vdev,struct v4l2_device * v4l2_dev,struct rkisp_device * dev)343 int rkisp_register_stats_vdev(struct rkisp_isp_stats_vdev *stats_vdev,
344 struct v4l2_device *v4l2_dev,
345 struct rkisp_device *dev)
346 {
347 int ret;
348 struct rkisp_vdev_node *node = &stats_vdev->vnode;
349 struct video_device *vdev = &node->vdev;
350 struct media_entity *source, *sink;
351
352 stats_vdev->dev = dev;
353 INIT_LIST_HEAD(&stats_vdev->stat);
354 spin_lock_init(&stats_vdev->irq_lock);
355 spin_lock_init(&stats_vdev->rd_lock);
356
357 strlcpy(vdev->name, STATS_NAME, sizeof(vdev->name));
358
359 vdev->ioctl_ops = &rkisp_stats_ioctl;
360 vdev->fops = &rkisp_stats_fops;
361 vdev->release = video_device_release_empty;
362 vdev->lock = &dev->iqlock;
363 vdev->v4l2_dev = v4l2_dev;
364 vdev->queue = &node->buf_queue;
365 vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
366 vdev->vfl_dir = VFL_DIR_RX;
367 rkisp_stats_init_vb2_queue(vdev->queue, stats_vdev);
368 rkisp_init_stats_vdev(stats_vdev);
369 video_set_drvdata(vdev, stats_vdev);
370
371 node->pad.flags = MEDIA_PAD_FL_SINK;
372 ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
373 if (ret < 0)
374 goto err_release_queue;
375
376 ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
377 if (ret < 0) {
378 dev_err(&vdev->dev,
379 "could not register Video for Linux device\n");
380 goto err_cleanup_media_entity;
381 }
382
383 source = &dev->isp_sdev.sd.entity;
384 sink = &stats_vdev->vnode.vdev.entity;
385 ret = media_create_pad_link(source, RKISP_ISP_PAD_SOURCE_STATS,
386 sink, 0, MEDIA_LNK_FL_ENABLED);
387 if (ret < 0)
388 goto err_unregister_video;
389
390 ret = kfifo_alloc(&stats_vdev->rd_kfifo,
391 RKISP_READOUT_WORK_SIZE,
392 GFP_KERNEL);
393 if (ret) {
394 dev_err(&vdev->dev,
395 "kfifo_alloc failed with error %d\n",
396 ret);
397 goto err_unregister_video;
398 }
399
400 tasklet_init(&stats_vdev->rd_tasklet,
401 rkisp_stats_readout_task,
402 (unsigned long)stats_vdev);
403 tasklet_disable(&stats_vdev->rd_tasklet);
404
405 return 0;
406
407 err_unregister_video:
408 video_unregister_device(vdev);
409 err_cleanup_media_entity:
410 media_entity_cleanup(&vdev->entity);
411 err_release_queue:
412 vb2_queue_release(vdev->queue);
413 rkisp_uninit_stats_vdev(stats_vdev);
414 return ret;
415 }
416
rkisp_unregister_stats_vdev(struct rkisp_isp_stats_vdev * stats_vdev)417 void rkisp_unregister_stats_vdev(struct rkisp_isp_stats_vdev *stats_vdev)
418 {
419 struct rkisp_vdev_node *node = &stats_vdev->vnode;
420 struct video_device *vdev = &node->vdev;
421
422 kfifo_free(&stats_vdev->rd_kfifo);
423 tasklet_kill(&stats_vdev->rd_tasklet);
424 video_unregister_device(vdev);
425 media_entity_cleanup(&vdev->entity);
426 vb2_queue_release(vdev->queue);
427 rkisp_uninit_stats_vdev(stats_vdev);
428 }
429
430