1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019 Rockchip Electronics Co., Ltd */
3
4 #include <media/videobuf2-dma-contig.h>
5 #include <linux/delay.h>
6 #include <linux/of_platform.h>
7 #include <linux/slab.h>
8 #include "dev.h"
9 #include "regs.h"
10
rkispp_write(struct rkispp_device * dev,u32 reg,u32 val)11 void rkispp_write(struct rkispp_device *dev, u32 reg, u32 val)
12 {
13 u32 *mem = dev->sw_base_addr + reg;
14 u32 *flag = dev->sw_base_addr + reg + RKISP_ISPP_SW_REG_SIZE;
15
16 *mem = val;
17 *flag = SW_REG_CACHE;
18 if (dev->hw_dev->is_single)
19 writel(val, dev->hw_dev->base_addr + reg);
20 }
21
rkispp_read(struct rkispp_device * dev,u32 reg)22 u32 rkispp_read(struct rkispp_device *dev, u32 reg)
23 {
24 u32 val;
25
26 if (dev->hw_dev->is_single)
27 val = readl(dev->hw_dev->base_addr + reg);
28 else
29 val = *(u32 *)(dev->sw_base_addr + reg);
30 return val;
31 }
32
rkispp_set_bits(struct rkispp_device * dev,u32 reg,u32 mask,u32 val)33 void rkispp_set_bits(struct rkispp_device *dev, u32 reg, u32 mask, u32 val)
34 {
35 u32 tmp = rkispp_read(dev, reg) & ~mask;
36
37 rkispp_write(dev, reg, val | tmp);
38 }
39
rkispp_clear_bits(struct rkispp_device * dev,u32 reg,u32 mask)40 void rkispp_clear_bits(struct rkispp_device *dev, u32 reg, u32 mask)
41 {
42 u32 tmp = rkispp_read(dev, reg);
43
44 rkispp_write(dev, reg, tmp & ~mask);
45 }
46
rkispp_update_regs(struct rkispp_device * dev,u32 start,u32 end)47 void rkispp_update_regs(struct rkispp_device *dev, u32 start, u32 end)
48 {
49 void __iomem *base = dev->hw_dev->base_addr;
50 u32 i;
51
52 if (end > RKISP_ISPP_SW_REG_SIZE - 4) {
53 dev_err(dev->dev, "%s out of range\n", __func__);
54 return;
55 }
56 for (i = start; i <= end; i += 4) {
57 u32 *val = dev->sw_base_addr + i;
58 u32 *flag = dev->sw_base_addr + i + RKISP_ISPP_SW_REG_SIZE;
59
60 if (*flag == SW_REG_CACHE)
61 writel(*val, base + i);
62 }
63 }
64
rkispp_allow_buffer(struct rkispp_device * dev,struct rkispp_dummy_buffer * buf)65 int rkispp_allow_buffer(struct rkispp_device *dev,
66 struct rkispp_dummy_buffer *buf)
67 {
68 unsigned long attrs = buf->is_need_vaddr ? 0 : DMA_ATTR_NO_KERNEL_MAPPING;
69 const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
70 struct sg_table *sg_tbl;
71 void *mem_priv;
72 int ret = 0;
73
74 if (!buf->size) {
75 ret = -EINVAL;
76 goto err;
77 }
78
79 if (dev->hw_dev->is_dma_contig)
80 attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
81 buf->size = PAGE_ALIGN(buf->size);
82 mem_priv = g_ops->alloc(dev->hw_dev->dev, attrs, buf->size,
83 DMA_BIDIRECTIONAL, GFP_KERNEL | GFP_DMA32);
84 if (IS_ERR_OR_NULL(mem_priv)) {
85 ret = -ENOMEM;
86 goto err;
87 }
88
89 buf->mem_priv = mem_priv;
90 if (dev->hw_dev->is_dma_sg_ops) {
91 sg_tbl = (struct sg_table *)g_ops->cookie(mem_priv);
92 buf->dma_addr = sg_dma_address(sg_tbl->sgl);
93 g_ops->prepare(mem_priv);
94 } else {
95 buf->dma_addr = *((dma_addr_t *)g_ops->cookie(mem_priv));
96 }
97 if (buf->is_need_vaddr)
98 buf->vaddr = g_ops->vaddr(mem_priv);
99 if (buf->is_need_dbuf) {
100 buf->dbuf = g_ops->get_dmabuf(mem_priv, O_RDWR);
101 if (buf->is_need_dmafd) {
102 buf->dma_fd = dma_buf_fd(buf->dbuf, O_CLOEXEC);
103 if (buf->dma_fd < 0) {
104 dma_buf_put(buf->dbuf);
105 ret = buf->dma_fd;
106 goto err;
107 }
108 get_dma_buf(buf->dbuf);
109 }
110 }
111 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
112 "%s buf:0x%x~0x%x size:%d\n", __func__,
113 (u32)buf->dma_addr, (u32)buf->dma_addr + buf->size, buf->size);
114 return ret;
115 err:
116 dev_err(dev->dev, "%s failed ret:%d\n", __func__, ret);
117 return ret;
118 }
119
rkispp_free_buffer(struct rkispp_device * dev,struct rkispp_dummy_buffer * buf)120 void rkispp_free_buffer(struct rkispp_device *dev,
121 struct rkispp_dummy_buffer *buf)
122 {
123 const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
124
125 if (buf && buf->mem_priv) {
126 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
127 "%s buf:0x%x~0x%x\n", __func__,
128 (u32)buf->dma_addr, (u32)buf->dma_addr + buf->size);
129 if (buf->dbuf)
130 dma_buf_put(buf->dbuf);
131 g_ops->put(buf->mem_priv);
132 buf->size = 0;
133 buf->dbuf = NULL;
134 buf->vaddr = NULL;
135 buf->mem_priv = NULL;
136 buf->is_need_dbuf = false;
137 buf->is_need_vaddr = false;
138 buf->is_need_dmafd = false;
139 }
140 }
141
rkispp_prepare_buffer(struct rkispp_device * dev,struct rkispp_dummy_buffer * buf)142 void rkispp_prepare_buffer(struct rkispp_device *dev,
143 struct rkispp_dummy_buffer *buf)
144 {
145 const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
146
147 if (buf && buf->mem_priv)
148 g_ops->prepare(buf->mem_priv);
149 }
150
rkispp_finish_buffer(struct rkispp_device * dev,struct rkispp_dummy_buffer * buf)151 void rkispp_finish_buffer(struct rkispp_device *dev,
152 struct rkispp_dummy_buffer *buf)
153 {
154 const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
155
156 if (buf && buf->mem_priv)
157 g_ops->finish(buf->mem_priv);
158 }
159
rkispp_attach_hw(struct rkispp_device * ispp)160 int rkispp_attach_hw(struct rkispp_device *ispp)
161 {
162 struct device_node *np;
163 struct platform_device *pdev;
164 struct rkispp_hw_dev *hw;
165
166 np = of_parse_phandle(ispp->dev->of_node, "rockchip,hw", 0);
167 if (!np || !of_device_is_available(np)) {
168 dev_err(ispp->dev, "failed to get ispp hw node\n");
169 return -ENODEV;
170 }
171
172 pdev = of_find_device_by_node(np);
173 of_node_put(np);
174 if (!pdev) {
175 dev_err(ispp->dev, "failed to get ispp hw from node\n");
176 return -ENODEV;
177 }
178
179 hw = platform_get_drvdata(pdev);
180 if (!hw) {
181 dev_err(ispp->dev, "failed attach ispp hw\n");
182 return -EINVAL;
183 }
184
185 if (hw->dev_num)
186 hw->is_single = false;
187 ispp->dev_id = hw->dev_num;
188 hw->ispp[hw->dev_num] = ispp;
189 hw->dev_num++;
190 ispp->hw_dev = hw;
191 ispp->ispp_ver = hw->ispp_ver;
192
193 return 0;
194 }
195
rkispp_init_regbuf(struct rkispp_hw_dev * hw)196 static int rkispp_init_regbuf(struct rkispp_hw_dev *hw)
197 {
198 struct rkisp_ispp_reg *reg_buf;
199 u32 i, buf_size;
200
201 if (!rkispp_is_reg_withstream_global()) {
202 hw->reg_buf = NULL;
203 return 0;
204 }
205
206 buf_size = RKISP_ISPP_REGBUF_NUM * sizeof(struct rkisp_ispp_reg);
207 hw->reg_buf = vmalloc(buf_size);
208 if (!hw->reg_buf)
209 return -ENOMEM;
210
211 reg_buf = hw->reg_buf;
212 for (i = 0; i < RKISP_ISPP_REGBUF_NUM; i++) {
213 reg_buf[i].stat = ISP_ISPP_FREE;
214 reg_buf[i].dev_id = 0xFF;
215 reg_buf[i].frame_id = 0;
216 reg_buf[i].reg_size = 0;
217 reg_buf[i].sof_timestamp = 0LL;
218 reg_buf[i].frame_timestamp = 0LL;
219 }
220
221 return 0;
222 }
223
rkispp_free_regbuf(struct rkispp_hw_dev * hw)224 static void rkispp_free_regbuf(struct rkispp_hw_dev *hw)
225 {
226 if (hw->reg_buf) {
227 vfree(hw->reg_buf);
228 hw->reg_buf = NULL;
229 }
230 }
231
rkispp_find_regbuf_by_stat(struct rkispp_hw_dev * hw,struct rkisp_ispp_reg ** free_buf,enum rkisp_ispp_reg_stat stat)232 static int rkispp_find_regbuf_by_stat(struct rkispp_hw_dev *hw, struct rkisp_ispp_reg **free_buf,
233 enum rkisp_ispp_reg_stat stat)
234 {
235 struct rkisp_ispp_reg *reg_buf = hw->reg_buf;
236 int i = 0, ret;
237
238 *free_buf = NULL;
239 if (!hw->reg_buf || !rkispp_reg_withstream)
240 return -EINVAL;
241
242 for (i = 0; i < RKISP_ISPP_REGBUF_NUM; i++) {
243 if (reg_buf[i].stat == stat)
244 break;
245 }
246
247 ret = -ENODATA;
248 if (i < RKISP_ISPP_REGBUF_NUM) {
249 ret = 0;
250 *free_buf = ®_buf[i];
251 }
252
253 return ret;
254 }
255
rkispp_free_pool(struct rkispp_hw_dev * hw)256 static void rkispp_free_pool(struct rkispp_hw_dev *hw)
257 {
258 const struct vb2_mem_ops *g_ops = hw->mem_ops;
259 struct rkispp_isp_buf_pool *buf;
260 int i, j;
261
262 if (atomic_read(&hw->refcnt))
263 return;
264
265 for (i = 0; i < RKISPP_BUF_POOL_MAX; i++) {
266 buf = &hw->pool[i];
267 if (!buf->dbufs)
268 break;
269 if (rkispp_debug)
270 dev_info(hw->dev, "%s dbufs[%d]:0x%p\n",
271 __func__, i, buf->dbufs);
272 for (j = 0; j < hw->pool[0].group_buf_max; j++) {
273 if (buf->mem_priv[j]) {
274 g_ops->unmap_dmabuf(buf->mem_priv[j]);
275 g_ops->detach_dmabuf(buf->mem_priv[j]);
276 dma_buf_put(buf->dbufs->dbuf[j]);
277 buf->mem_priv[j] = NULL;
278 }
279 }
280 buf->dbufs = NULL;
281 }
282
283 rkispp_free_regbuf(hw);
284 hw->is_idle = true;
285 }
286
rkispp_init_pool(struct rkispp_hw_dev * hw,struct rkisp_ispp_buf * dbufs)287 static int rkispp_init_pool(struct rkispp_hw_dev *hw, struct rkisp_ispp_buf *dbufs)
288 {
289 const struct vb2_mem_ops *g_ops = hw->mem_ops;
290 struct rkispp_isp_buf_pool *pool;
291 struct sg_table *sg_tbl;
292 int i, ret = 0;
293 void *mem;
294
295 INIT_LIST_HEAD(&hw->list);
296 /* init dma buf pool */
297 for (i = 0; i < RKISPP_BUF_POOL_MAX; i++) {
298 pool = &hw->pool[i];
299 if (!pool->dbufs)
300 break;
301 }
302 dbufs->is_isp = true;
303 pool->dbufs = dbufs;
304 if (rkispp_debug)
305 dev_info(hw->dev, "%s dbufs[%d]:0x%p\n",
306 __func__, i, dbufs);
307 for (i = 0; i < hw->pool[0].group_buf_max; i++) {
308 mem = g_ops->attach_dmabuf(hw->dev, dbufs->dbuf[i],
309 dbufs->dbuf[i]->size, DMA_BIDIRECTIONAL);
310 if (IS_ERR(mem)) {
311 ret = PTR_ERR(mem);
312 goto err;
313 }
314 pool->mem_priv[i] = mem;
315 ret = g_ops->map_dmabuf(mem);
316 if (ret)
317 goto err;
318 if (hw->is_dma_sg_ops) {
319 sg_tbl = (struct sg_table *)g_ops->cookie(mem);
320 pool->dma[i] = sg_dma_address(sg_tbl->sgl);
321 } else {
322 pool->dma[i] = *((dma_addr_t *)g_ops->cookie(mem));
323 }
324 get_dma_buf(dbufs->dbuf[i]);
325 pool->vaddr[i] = g_ops->vaddr(mem);
326 if (rkispp_debug)
327 dev_info(hw->dev, "%s dma[%d]:0x%x\n",
328 __func__, i, (u32)pool->dma[i]);
329
330 }
331 rkispp_init_regbuf(hw);
332 hw->is_idle = true;
333 return ret;
334 err:
335 rkispp_free_pool(hw);
336 return ret;
337 }
338
rkispp_queue_dmabuf(struct rkispp_hw_dev * hw,struct rkisp_ispp_buf * dbufs)339 static void rkispp_queue_dmabuf(struct rkispp_hw_dev *hw, struct rkisp_ispp_buf *dbufs)
340 {
341 struct list_head *list = &hw->list;
342 struct rkispp_device *ispp;
343 struct rkispp_stream_vdev *vdev;
344 struct rkisp_ispp_buf *buf = NULL;
345 unsigned long lock_flags = 0;
346 u32 val;
347
348 spin_lock_irqsave(&hw->buf_lock, lock_flags);
349 if (!dbufs)
350 hw->is_idle = true;
351 if (hw->is_shutdown)
352 hw->is_idle = false;
353 if (dbufs && list_empty(list) && hw->is_idle) {
354 /* ispp idle or handle same device */
355 buf = dbufs;
356 } else if (hw->is_idle && !list_empty(list)) {
357 /* ispp idle and handle first buf in list */
358 buf = list_first_entry(list,
359 struct rkisp_ispp_buf, list);
360 list_del(&buf->list);
361 if (dbufs)
362 list_add_tail(&dbufs->list, list);
363 } else if (dbufs) {
364 /* new buf into queue wait for handle */
365 list_add_tail(&dbufs->list, list);
366 }
367
368 if (buf) {
369 hw->is_idle = false;
370 hw->cur_dev_id = buf->index;
371 ispp = hw->ispp[buf->index];
372 vdev = &ispp->stream_vdev;
373 val = (vdev->module_ens & ISPP_MODULE_TNR) ? ISPP_MODULE_TNR :
374 ((vdev->module_ens & ISPP_MODULE_NR) ? ISPP_MODULE_NR : ISPP_MODULE_FEC);
375 vdev->stream_ops->rkispp_module_work_event(ispp, buf, NULL, val, false);
376 }
377
378 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
379 }
380
rkispp_event_handle(struct rkispp_device * ispp,u32 cmd,void * arg)381 int rkispp_event_handle(struct rkispp_device *ispp, u32 cmd, void *arg)
382 {
383 struct rkispp_hw_dev *hw = ispp->hw_dev;
384 int ret = 0;
385
386 switch (cmd) {
387 case CMD_STREAM:
388 if (*(int *)arg)
389 atomic_inc(&hw->refcnt);
390 else
391 atomic_dec(&hw->refcnt);
392 break;
393 case CMD_INIT_POOL:
394 ret = rkispp_init_pool(hw, arg);
395 break;
396 case CMD_FREE_POOL:
397 rkispp_free_pool(hw);
398 break;
399 case CMD_QUEUE_DMABUF:
400 rkispp_queue_dmabuf(hw, arg);
401 break;
402 default:
403 ret = -EFAULT;
404 }
405
406 return ret;
407 }
408
rkispp_alloc_page_dummy_buf(struct rkispp_device * dev,u32 size)409 static int rkispp_alloc_page_dummy_buf(struct rkispp_device *dev, u32 size)
410 {
411 struct rkispp_hw_dev *hw = dev->hw_dev;
412 struct rkispp_dummy_buffer *dummy_buf = &hw->dummy_buf;
413 u32 i, n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
414 struct page *page = NULL, **pages = NULL;
415 struct sg_table *sg = NULL;
416 int ret = -ENOMEM;
417
418 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 0);
419 if (!page)
420 goto err;
421
422 pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL);
423 if (!pages)
424 goto free_page;
425 for (i = 0; i < n_pages; i++)
426 pages[i] = page;
427
428 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
429 if (!sg)
430 goto free_pages;
431 ret = sg_alloc_table_from_pages(sg, pages, n_pages, 0,
432 n_pages << PAGE_SHIFT, GFP_KERNEL);
433 if (ret)
434 goto free_sg;
435
436 ret = dma_map_sg(hw->dev, sg->sgl, sg->nents, DMA_BIDIRECTIONAL);
437 dummy_buf->dma_addr = sg_dma_address(sg->sgl);
438 dummy_buf->mem_priv = sg;
439 dummy_buf->pages = pages;
440 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
441 "%s buf:0x%x map cnt:%d\n", __func__,
442 (u32)dummy_buf->dma_addr, ret);
443 return 0;
444 free_sg:
445 kfree(sg);
446 free_pages:
447 kvfree(pages);
448 free_page:
449 __free_pages(page, 0);
450 err:
451 return ret;
452 }
453
rkispp_free_page_dummy_buf(struct rkispp_device * dev)454 static void rkispp_free_page_dummy_buf(struct rkispp_device *dev)
455 {
456 struct rkispp_dummy_buffer *dummy_buf = &dev->hw_dev->dummy_buf;
457 struct sg_table *sg = dummy_buf->mem_priv;
458
459 if (!sg)
460 return;
461 dma_unmap_sg(dev->hw_dev->dev, sg->sgl, sg->nents, DMA_BIDIRECTIONAL);
462 sg_free_table(sg);
463 kfree(sg);
464 __free_pages(dummy_buf->pages[0], 0);
465 kvfree(dummy_buf->pages);
466 dummy_buf->mem_priv = NULL;
467 dummy_buf->pages = NULL;
468 }
469
rkispp_alloc_common_dummy_buf(struct rkispp_device * dev)470 int rkispp_alloc_common_dummy_buf(struct rkispp_device *dev)
471 {
472 struct rkispp_hw_dev *hw = dev->hw_dev;
473 struct rkispp_subdev *sdev = &dev->ispp_sdev;
474 struct rkispp_dummy_buffer *dummy_buf = &hw->dummy_buf;
475 u32 w = hw->max_in.w ? hw->max_in.w : sdev->out_fmt.width;
476 u32 h = hw->max_in.h ? hw->max_in.h : sdev->out_fmt.height;
477 u32 size = w * h * 2;
478 int ret = 0;
479
480 mutex_lock(&hw->dev_lock);
481 if (dummy_buf->mem_priv)
482 goto end;
483
484 if (hw->is_mmu) {
485 ret = rkispp_alloc_page_dummy_buf(dev, size);
486 goto end;
487 }
488
489 dummy_buf->size = size;
490 ret = rkispp_allow_buffer(dev, dummy_buf);
491 if (!ret)
492 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
493 "%s buf:0x%x size:%d\n", __func__,
494 (u32)dummy_buf->dma_addr, dummy_buf->size);
495 end:
496 if (ret < 0)
497 v4l2_err(&dev->v4l2_dev, "%s failed:%d\n", __func__, ret);
498 mutex_unlock(&hw->dev_lock);
499 return ret;
500 }
501
rkispp_free_common_dummy_buf(struct rkispp_device * dev)502 void rkispp_free_common_dummy_buf(struct rkispp_device *dev)
503 {
504 struct rkispp_hw_dev *hw = dev->hw_dev;
505
506 mutex_lock(&hw->dev_lock);
507 if (atomic_read(&hw->refcnt) ||
508 atomic_read(&dev->stream_vdev.refcnt) > 1)
509 goto end;
510 if (hw->is_mmu)
511 rkispp_free_page_dummy_buf(dev);
512 else
513 rkispp_free_buffer(dev, &hw->dummy_buf);
514 end:
515 mutex_unlock(&hw->dev_lock);
516 }
517
rkispp_find_regbuf_by_id(struct rkispp_device * ispp,struct rkisp_ispp_reg ** free_buf,u32 dev_id,u32 frame_id)518 int rkispp_find_regbuf_by_id(struct rkispp_device *ispp, struct rkisp_ispp_reg **free_buf,
519 u32 dev_id, u32 frame_id)
520 {
521 struct rkispp_hw_dev *hw = ispp->hw_dev;
522 struct rkisp_ispp_reg *reg_buf = hw->reg_buf;
523 int i = 0, ret;
524
525 *free_buf = NULL;
526 if (!hw->reg_buf)
527 return -EINVAL;
528
529 for (i = 0; i < RKISP_ISPP_REGBUF_NUM; i++) {
530 if (reg_buf[i].dev_id == dev_id && reg_buf[i].frame_id == frame_id)
531 break;
532 }
533
534 ret = -ENODATA;
535 if (i < RKISP_ISPP_REGBUF_NUM) {
536 ret = 0;
537 *free_buf = ®_buf[i];
538 }
539
540 return ret;
541 }
542
rkispp_release_regbuf(struct rkispp_device * ispp,struct rkisp_ispp_reg * freebuf)543 void rkispp_release_regbuf(struct rkispp_device *ispp, struct rkisp_ispp_reg *freebuf)
544 {
545 struct rkispp_hw_dev *hw = ispp->hw_dev;
546 struct rkisp_ispp_reg *reg_buf = hw->reg_buf;
547 int i;
548
549 if (!hw->reg_buf)
550 return;
551
552 for (i = 0; i < RKISP_ISPP_REGBUF_NUM; i++) {
553 if (reg_buf[i].dev_id == freebuf->dev_id &&
554 reg_buf[i].frame_timestamp < freebuf->frame_timestamp) {
555 reg_buf[i].frame_id = 0;
556 reg_buf[i].stat = ISP_ISPP_FREE;
557 }
558 }
559 }
560
rkispp_request_regbuf(struct rkispp_device * dev,struct rkisp_ispp_reg ** free_buf)561 void rkispp_request_regbuf(struct rkispp_device *dev, struct rkisp_ispp_reg **free_buf)
562 {
563 struct rkispp_hw_dev *hw = dev->hw_dev;
564 int ret;
565
566 if (!hw->reg_buf) {
567 *free_buf = NULL;
568 return;
569 }
570
571 ret = rkispp_find_regbuf_by_stat(hw, free_buf, ISP_ISPP_FREE);
572 if (!ret) {
573 (*free_buf)->stat = ISP_ISPP_INUSE;
574 }
575 }
576
rkispp_is_reg_withstream_global(void)577 bool rkispp_is_reg_withstream_global(void)
578 {
579 return rkispp_reg_withstream;
580 }
581
rkispp_is_reg_withstream_local(struct device * dev)582 bool rkispp_is_reg_withstream_local(struct device *dev)
583 {
584 const char *node_name = dev_name(dev);
585
586 if (!node_name)
587 return false;
588
589 if (!memcmp(rkispp_reg_withstream_video_name, node_name,
590 strlen(node_name)))
591 return true;
592 else
593 return false;
594 }
595