1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd. */
3
4 #include <linux/kfifo.h>
5 #include <media/v4l2-common.h>
6 #include <media/v4l2-ioctl.h>
7 #include <media/videobuf2-core.h>
8 #include <media/videobuf2-vmalloc.h> /* for ISP statistics */
9 #include <media/videobuf2-dma-contig.h>
10 #include <media/videobuf2-dma-sg.h>
11 #include <media/v4l2-mc.h>
12 #include <uapi/linux/rk-video-format.h>
13 #include "dev.h"
14 #include "regs.h"
15 #include "stats.h"
16
17 #define RKISPP_STATS_REQ_BUFS_MIN 2
18 #define RKISPP_STATS_REQ_BUFS_MAX 8
19
update_addr(struct rkispp_stats_vdev * stats_vdev)20 static void update_addr(struct rkispp_stats_vdev *stats_vdev)
21 {
22 struct rkispp_dummy_buffer *dummy_buf;
23 u32 addr;
24
25 if (stats_vdev->curr_buf) {
26 addr = stats_vdev->curr_buf->buff_addr[0];
27 rkispp_write(stats_vdev->dev, RKISPP_ORB_WR_BASE, addr);
28 }
29
30 if (!stats_vdev->curr_buf) {
31 dummy_buf = &stats_vdev->dev->hw_dev->dummy_buf;
32 if (!dummy_buf->mem_priv)
33 return;
34
35 rkispp_write(stats_vdev->dev, RKISPP_ORB_WR_BASE, dummy_buf->dma_addr);
36 }
37 }
38
rkispp_stats_frame_end(struct rkispp_stats_vdev * stats_vdev)39 static int rkispp_stats_frame_end(struct rkispp_stats_vdev *stats_vdev)
40 {
41 void __iomem *base = stats_vdev->dev->hw_dev->base_addr;
42 struct rkispp_device *dev = stats_vdev->dev;
43 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
44 unsigned long lock_flags = 0;
45
46 if (stats_vdev->curr_buf) {
47 u32 payload_size = 0;
48 u64 ns = ktime_get_ns();
49 u32 cur_frame_id = stats_vdev->frame_id;
50 struct rkispp_buffer *curr_buf = stats_vdev->curr_buf;
51 void *vaddr = vb2_plane_vaddr(&curr_buf->vb.vb2_buf, 0);
52
53 if (stats_vdev->vdev_id == STATS_VDEV_TNR) {
54 struct rkispp_stats_tnrbuf *tnrbuf = vaddr;
55
56 payload_size = sizeof(struct rkispp_stats_tnrbuf);
57 tnrbuf->frame_id = cur_frame_id;
58 tnrbuf->gain.index = -1;
59 tnrbuf->gainkg.index = -1;
60 if (vdev->tnr.cur_wr) {
61 tnrbuf->gain.index = vdev->tnr.cur_wr->didx[GROUP_BUF_GAIN];
62 tnrbuf->gain.size = vdev->tnr.cur_wr->dbuf[GROUP_BUF_GAIN]->size;
63 tnrbuf->gainkg.index = vdev->tnr.buf.gain_kg.index;
64 tnrbuf->gainkg.size = vdev->tnr.buf.gain_kg.size;
65 }
66 } else if (stats_vdev->vdev_id == STATS_VDEV_NR) {
67 struct rkispp_stats_nrbuf *nrbuf = vaddr;
68
69 payload_size = sizeof(struct rkispp_stats_nrbuf);
70 nrbuf->total_num = readl(base + RKISPP_ORB_TOTAL_NUM);
71 nrbuf->frame_id = cur_frame_id;
72 nrbuf->image.index = -1;
73 if (vdev->nr.cur_wr &&
74 (dev->stream_vdev.module_ens & ISPP_MODULE_FEC_ST) == ISPP_MODULE_FEC_ST) {
75 nrbuf->image.index = vdev->nr.cur_wr->index;
76 nrbuf->image.size = vdev->nr.cur_wr->size;
77 v4l2_dbg(3, rkispp_debug, &dev->v4l2_dev,
78 "%s frame:%d nr output buf index:%d fd:%d dma:%pad\n",
79 __func__, cur_frame_id,
80 vdev->nr.cur_wr->index,
81 vdev->nr.cur_wr->dma_fd,
82 &vdev->nr.cur_wr->dma_addr);
83 }
84 }
85
86 curr_buf->vb.vb2_buf.timestamp = ns;
87 curr_buf->vb.sequence = cur_frame_id;
88 vb2_set_plane_payload(&curr_buf->vb.vb2_buf, 0, payload_size);
89 vb2_buffer_done(&curr_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
90 stats_vdev->curr_buf = NULL;
91 }
92
93 spin_lock_irqsave(&stats_vdev->irq_lock, lock_flags);
94 if (!list_empty(&stats_vdev->stat)) {
95 stats_vdev->curr_buf = list_first_entry(&stats_vdev->stat,
96 struct rkispp_buffer, queue);
97 list_del(&stats_vdev->curr_buf->queue);
98 }
99 spin_unlock_irqrestore(&stats_vdev->irq_lock, lock_flags);
100
101 if (stats_vdev->vdev_id == STATS_VDEV_NR)
102 update_addr(stats_vdev);
103 return 0;
104 }
105
rkispp_stats_enum_fmt_meta_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)106 static int rkispp_stats_enum_fmt_meta_cap(struct file *file, void *priv,
107 struct v4l2_fmtdesc *f)
108 {
109 struct video_device *video = video_devdata(file);
110 struct rkispp_stats_vdev *stats_vdev = video_get_drvdata(video);
111
112 if (f->index > 0 || f->type != video->queue->type)
113 return -EINVAL;
114
115 f->pixelformat = stats_vdev->vdev_fmt.fmt.meta.dataformat;
116 return 0;
117 }
118
rkispp_stats_g_fmt_meta_cap(struct file * file,void * priv,struct v4l2_format * f)119 static int rkispp_stats_g_fmt_meta_cap(struct file *file, void *priv,
120 struct v4l2_format *f)
121 {
122 struct video_device *video = video_devdata(file);
123 struct rkispp_stats_vdev *stats_vdev = video_get_drvdata(video);
124 struct v4l2_meta_format *meta = &f->fmt.meta;
125
126 if (f->type != video->queue->type)
127 return -EINVAL;
128
129 memset(meta, 0, sizeof(*meta));
130 meta->dataformat = stats_vdev->vdev_fmt.fmt.meta.dataformat;
131 meta->buffersize = stats_vdev->vdev_fmt.fmt.meta.buffersize;
132
133 return 0;
134 }
135
rkispp_stats_querycap(struct file * file,void * priv,struct v4l2_capability * cap)136 static int rkispp_stats_querycap(struct file *file,
137 void *priv, struct v4l2_capability *cap)
138 {
139 struct video_device *vdev = video_devdata(file);
140 struct rkispp_stats_vdev *stats_vdev = video_get_drvdata(vdev);
141
142 strcpy(cap->driver, DRIVER_NAME);
143 snprintf(cap->driver, sizeof(cap->driver),
144 "%s_v%d", DRIVER_NAME,
145 stats_vdev->dev->ispp_ver >> 4);
146 strlcpy(cap->card, vdev->name, sizeof(cap->card));
147 strlcpy(cap->bus_info, "platform: " DRIVER_NAME, sizeof(cap->bus_info));
148
149 return 0;
150 }
151
rkispp_stats_fh_open(struct file * filp)152 static int rkispp_stats_fh_open(struct file *filp)
153 {
154 struct rkispp_stats_vdev *stats = video_drvdata(filp);
155 struct rkispp_device *isppdev = stats->dev;
156 int ret;
157
158 ret = v4l2_fh_open(filp);
159 if (!ret) {
160 ret = v4l2_pipeline_pm_get(&stats->vnode.vdev.entity);
161 if (ret < 0) {
162 v4l2_err(&isppdev->v4l2_dev,
163 "pipeline power on failed %d\n", ret);
164 vb2_fop_release(filp);
165 }
166 }
167 return ret;
168 }
169
rkispp_stats_fh_release(struct file * filp)170 static int rkispp_stats_fh_release(struct file *filp)
171 {
172 struct rkispp_stats_vdev *stats = video_drvdata(filp);
173 int ret;
174
175 ret = vb2_fop_release(filp);
176 if (!ret)
177 v4l2_pipeline_pm_put(&stats->vnode.vdev.entity);
178 return ret;
179 }
180
181 /* ISP video device IOCTLs */
182 static const struct v4l2_ioctl_ops rkispp_stats_ioctl = {
183 .vidioc_reqbufs = vb2_ioctl_reqbufs,
184 .vidioc_querybuf = vb2_ioctl_querybuf,
185 .vidioc_create_bufs = vb2_ioctl_create_bufs,
186 .vidioc_qbuf = vb2_ioctl_qbuf,
187 .vidioc_dqbuf = vb2_ioctl_dqbuf,
188 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
189 .vidioc_expbuf = vb2_ioctl_expbuf,
190 .vidioc_streamon = vb2_ioctl_streamon,
191 .vidioc_streamoff = vb2_ioctl_streamoff,
192 .vidioc_enum_fmt_meta_cap = rkispp_stats_enum_fmt_meta_cap,
193 .vidioc_g_fmt_meta_cap = rkispp_stats_g_fmt_meta_cap,
194 .vidioc_s_fmt_meta_cap = rkispp_stats_g_fmt_meta_cap,
195 .vidioc_try_fmt_meta_cap = rkispp_stats_g_fmt_meta_cap,
196 .vidioc_querycap = rkispp_stats_querycap
197 };
198
199 struct v4l2_file_operations rkispp_stats_fops = {
200 .mmap = vb2_fop_mmap,
201 .unlocked_ioctl = video_ioctl2,
202 .poll = vb2_fop_poll,
203 .open = rkispp_stats_fh_open,
204 .release = rkispp_stats_fh_release,
205 };
206
rkispp_stats_vb2_queue_setup(struct vb2_queue * vq,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_ctxs[])207 static int rkispp_stats_vb2_queue_setup(struct vb2_queue *vq,
208 unsigned int *num_buffers,
209 unsigned int *num_planes,
210 unsigned int sizes[],
211 struct device *alloc_ctxs[])
212 {
213 struct rkispp_stats_vdev *stats_vdev = vq->drv_priv;
214
215 *num_planes = 1;
216
217 *num_buffers = clamp_t(u32, *num_buffers, RKISPP_STATS_REQ_BUFS_MIN,
218 RKISPP_STATS_REQ_BUFS_MAX);
219
220 switch (stats_vdev->vdev_id) {
221 case STATS_VDEV_TNR:
222 sizes[0] = sizeof(struct rkispp_stats_tnrbuf);
223 break;
224 case STATS_VDEV_NR:
225 default:
226 sizes[0] = sizeof(struct rkispp_stats_nrbuf);
227 break;
228 }
229 INIT_LIST_HEAD(&stats_vdev->stat);
230
231 return 0;
232 }
233
rkispp_stats_vb2_buf_queue(struct vb2_buffer * vb)234 static void rkispp_stats_vb2_buf_queue(struct vb2_buffer *vb)
235 {
236 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
237 struct rkispp_buffer *buf = to_rkispp_buffer(vbuf);
238 struct vb2_queue *vq = vb->vb2_queue;
239 struct rkispp_stats_vdev *stats_dev = vq->drv_priv;
240 unsigned long lock_flags = 0;
241
242 vb2_plane_vaddr(vb, 0);
243 if (stats_dev->dev->hw_dev->is_dma_sg_ops) {
244 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
245
246 buf->buff_addr[0] = sg_dma_address(sgt->sgl);
247 } else {
248 buf->buff_addr[0] = vb2_dma_contig_plane_dma_addr(vb, 0);
249 }
250 spin_lock_irqsave(&stats_dev->irq_lock, lock_flags);
251 list_add_tail(&buf->queue, &stats_dev->stat);
252 spin_unlock_irqrestore(&stats_dev->irq_lock, lock_flags);
253 }
254
destroy_buf_queue(struct rkispp_stats_vdev * stats_vdev,enum vb2_buffer_state state)255 static void destroy_buf_queue(struct rkispp_stats_vdev *stats_vdev,
256 enum vb2_buffer_state state)
257 {
258 struct rkispp_buffer *buf;
259
260 if (stats_vdev->curr_buf) {
261 list_add_tail(&stats_vdev->curr_buf->queue, &stats_vdev->stat);
262 stats_vdev->curr_buf = NULL;
263 }
264 while (!list_empty(&stats_vdev->stat)) {
265 buf = list_first_entry(&stats_vdev->stat,
266 struct rkispp_buffer, queue);
267 list_del(&buf->queue);
268 vb2_buffer_done(&buf->vb.vb2_buf, state);
269 }
270 }
271
rkispp_stats_vb2_stop_streaming(struct vb2_queue * vq)272 static void rkispp_stats_vb2_stop_streaming(struct vb2_queue *vq)
273 {
274 struct rkispp_stats_vdev *stats_vdev = vq->drv_priv;
275 unsigned long flags;
276
277 spin_lock_irqsave(&stats_vdev->irq_lock, flags);
278 stats_vdev->streamon = false;
279 destroy_buf_queue(stats_vdev, VB2_BUF_STATE_ERROR);
280 spin_unlock_irqrestore(&stats_vdev->irq_lock, flags);
281 }
282
283 static int
rkispp_stats_vb2_start_streaming(struct vb2_queue * queue,unsigned int count)284 rkispp_stats_vb2_start_streaming(struct vb2_queue *queue,
285 unsigned int count)
286 {
287 struct rkispp_stats_vdev *stats_vdev = queue->drv_priv;
288 unsigned long flags;
289
290 if (stats_vdev->streamon)
291 return -EBUSY;
292
293 /* config first buf */
294 rkispp_stats_frame_end(stats_vdev);
295
296 spin_lock_irqsave(&stats_vdev->irq_lock, flags);
297 stats_vdev->streamon = true;
298 spin_unlock_irqrestore(&stats_vdev->irq_lock, flags);
299
300 return 0;
301 }
302
303 static struct vb2_ops rkispp_stats_vb2_ops = {
304 .queue_setup = rkispp_stats_vb2_queue_setup,
305 .buf_queue = rkispp_stats_vb2_buf_queue,
306 .wait_prepare = vb2_ops_wait_prepare,
307 .wait_finish = vb2_ops_wait_finish,
308 .stop_streaming = rkispp_stats_vb2_stop_streaming,
309 .start_streaming = rkispp_stats_vb2_start_streaming,
310 };
311
rkispp_stats_init_vb2_queue(struct vb2_queue * q,struct rkispp_stats_vdev * stats_vdev)312 static int rkispp_stats_init_vb2_queue(struct vb2_queue *q,
313 struct rkispp_stats_vdev *stats_vdev)
314 {
315 q->type = V4L2_BUF_TYPE_META_CAPTURE;
316 q->io_modes = VB2_MMAP | VB2_USERPTR;
317 q->drv_priv = stats_vdev;
318 q->ops = &rkispp_stats_vb2_ops;
319 q->mem_ops = stats_vdev->dev->hw_dev->mem_ops;
320 q->buf_struct_size = sizeof(struct rkispp_buffer);
321 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
322 q->lock = &stats_vdev->dev->iqlock;
323 q->dev = stats_vdev->dev->hw_dev->dev;
324 if (stats_vdev->dev->hw_dev->is_dma_contig)
325 q->dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
326 q->gfp_flags = GFP_DMA32;
327 return vb2_queue_init(q);
328 }
329
rkispp_stats_isr(struct rkispp_stats_vdev * stats_vdev)330 void rkispp_stats_isr(struct rkispp_stats_vdev *stats_vdev)
331 {
332 spin_lock(&stats_vdev->irq_lock);
333 if (!stats_vdev->streamon) {
334 spin_unlock(&stats_vdev->irq_lock);
335 return;
336 }
337 spin_unlock(&stats_vdev->irq_lock);
338
339 rkispp_stats_frame_end(stats_vdev);
340 }
341
rkispp_init_stats_vdev(struct rkispp_stats_vdev * stats_vdev)342 static void rkispp_init_stats_vdev(struct rkispp_stats_vdev *stats_vdev)
343 {
344 stats_vdev->vdev_fmt.fmt.meta.dataformat = V4L2_META_FMT_RK_ISPP_STAT;
345 switch (stats_vdev->vdev_id) {
346 case STATS_VDEV_TNR:
347 stats_vdev->vdev_fmt.fmt.meta.buffersize =
348 sizeof(struct rkispp_stats_tnrbuf);
349 break;
350 case STATS_VDEV_NR:
351 default:
352 stats_vdev->vdev_fmt.fmt.meta.buffersize =
353 sizeof(struct rkispp_stats_nrbuf);
354 break;
355 }
356 }
357
rkispp_register_stats_vdev(struct rkispp_device * dev,enum rkispp_statsvdev_id vdev_id)358 static int rkispp_register_stats_vdev(struct rkispp_device *dev,
359 enum rkispp_statsvdev_id vdev_id)
360 {
361 struct rkispp_stats_vdev *stats_vdev = &dev->stats_vdev[vdev_id];
362 struct rkispp_vdev_node *node = &stats_vdev->vnode;
363 struct video_device *vdev = &node->vdev;
364 int ret;
365
366 stats_vdev->dev = dev;
367 stats_vdev->vdev_id = vdev_id;
368 INIT_LIST_HEAD(&stats_vdev->stat);
369 spin_lock_init(&stats_vdev->irq_lock);
370
371 switch (vdev_id) {
372 case STATS_VDEV_TNR:
373 strncpy(vdev->name, "rkispp_tnr_stats", sizeof(vdev->name) - 1);
374 break;
375 case STATS_VDEV_NR:
376 default:
377 strncpy(vdev->name, "rkispp_nr_stats", sizeof(vdev->name) - 1);
378 break;
379 }
380
381 vdev->ioctl_ops = &rkispp_stats_ioctl;
382 vdev->fops = &rkispp_stats_fops;
383 vdev->release = video_device_release_empty;
384 vdev->lock = &dev->iqlock;
385 vdev->v4l2_dev = &dev->v4l2_dev;
386 vdev->queue = &node->buf_queue;
387 vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
388 vdev->vfl_dir = VFL_DIR_RX;
389 rkispp_stats_init_vb2_queue(vdev->queue, stats_vdev);
390 rkispp_init_stats_vdev(stats_vdev);
391 video_set_drvdata(vdev, stats_vdev);
392
393 node->pad.flags = MEDIA_PAD_FL_SINK;
394 ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
395 if (ret < 0)
396 goto err_release_queue;
397
398 ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
399 if (ret < 0) {
400 dev_err(&vdev->dev,
401 "could not register Video for Linux device\n");
402 goto err_cleanup_media_entity;
403 }
404
405 return 0;
406
407 err_cleanup_media_entity:
408 media_entity_cleanup(&vdev->entity);
409 err_release_queue:
410 vb2_queue_release(vdev->queue);
411 return ret;
412 }
413
rkispp_unregister_stats_vdev(struct rkispp_device * dev,enum rkispp_statsvdev_id vdev_id)414 static void rkispp_unregister_stats_vdev(struct rkispp_device *dev,
415 enum rkispp_statsvdev_id vdev_id)
416 {
417 struct rkispp_stats_vdev *stats_vdev = &dev->stats_vdev[vdev_id];
418 struct rkispp_vdev_node *node = &stats_vdev->vnode;
419 struct video_device *vdev = &node->vdev;
420
421 video_unregister_device(vdev);
422 media_entity_cleanup(&vdev->entity);
423 vb2_queue_release(vdev->queue);
424 }
425
rkispp_register_stats_vdevs(struct rkispp_device * dev)426 int rkispp_register_stats_vdevs(struct rkispp_device *dev)
427 {
428 int ret = 0;
429
430 if (dev->ispp_ver != ISPP_V10)
431 return 0;
432
433 ret = rkispp_register_stats_vdev(dev, STATS_VDEV_TNR);
434 if (ret)
435 return ret;
436
437 ret = rkispp_register_stats_vdev(dev, STATS_VDEV_NR);
438 if (ret) {
439 rkispp_unregister_stats_vdev(dev, STATS_VDEV_TNR);
440 return ret;
441 }
442
443 return ret;
444 }
445
rkispp_unregister_stats_vdevs(struct rkispp_device * dev)446 void rkispp_unregister_stats_vdevs(struct rkispp_device *dev)
447 {
448 if (dev->ispp_ver != ISPP_V10)
449 return;
450 rkispp_unregister_stats_vdev(dev, STATS_VDEV_TNR);
451 rkispp_unregister_stats_vdev(dev, STATS_VDEV_NR);
452 }
453