1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (C) 2019 Rockchip Electronics Co., Ltd */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <media/videobuf2-dma-contig.h>
5*4882a593Smuzhiyun #include <linux/delay.h>
6*4882a593Smuzhiyun #include <linux/of_platform.h>
7*4882a593Smuzhiyun #include <linux/slab.h>
8*4882a593Smuzhiyun #include "dev.h"
9*4882a593Smuzhiyun #include "regs.h"
10*4882a593Smuzhiyun
rkispp_write(struct rkispp_device * dev,u32 reg,u32 val)11*4882a593Smuzhiyun void rkispp_write(struct rkispp_device *dev, u32 reg, u32 val)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun u32 *mem = dev->sw_base_addr + reg;
14*4882a593Smuzhiyun u32 *flag = dev->sw_base_addr + reg + RKISP_ISPP_SW_REG_SIZE;
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun *mem = val;
17*4882a593Smuzhiyun *flag = SW_REG_CACHE;
18*4882a593Smuzhiyun if (dev->hw_dev->is_single)
19*4882a593Smuzhiyun writel(val, dev->hw_dev->base_addr + reg);
20*4882a593Smuzhiyun }
21*4882a593Smuzhiyun
rkispp_read(struct rkispp_device * dev,u32 reg)22*4882a593Smuzhiyun u32 rkispp_read(struct rkispp_device *dev, u32 reg)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun u32 val;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun if (dev->hw_dev->is_single)
27*4882a593Smuzhiyun val = readl(dev->hw_dev->base_addr + reg);
28*4882a593Smuzhiyun else
29*4882a593Smuzhiyun val = *(u32 *)(dev->sw_base_addr + reg);
30*4882a593Smuzhiyun return val;
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun
rkispp_set_bits(struct rkispp_device * dev,u32 reg,u32 mask,u32 val)33*4882a593Smuzhiyun void rkispp_set_bits(struct rkispp_device *dev, u32 reg, u32 mask, u32 val)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun u32 tmp = rkispp_read(dev, reg) & ~mask;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun rkispp_write(dev, reg, val | tmp);
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
rkispp_clear_bits(struct rkispp_device * dev,u32 reg,u32 mask)40*4882a593Smuzhiyun void rkispp_clear_bits(struct rkispp_device *dev, u32 reg, u32 mask)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun u32 tmp = rkispp_read(dev, reg);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun rkispp_write(dev, reg, tmp & ~mask);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
rkispp_update_regs(struct rkispp_device * dev,u32 start,u32 end)47*4882a593Smuzhiyun void rkispp_update_regs(struct rkispp_device *dev, u32 start, u32 end)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun void __iomem *base = dev->hw_dev->base_addr;
50*4882a593Smuzhiyun u32 i;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun if (end > RKISP_ISPP_SW_REG_SIZE - 4) {
53*4882a593Smuzhiyun dev_err(dev->dev, "%s out of range\n", __func__);
54*4882a593Smuzhiyun return;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun for (i = start; i <= end; i += 4) {
57*4882a593Smuzhiyun u32 *val = dev->sw_base_addr + i;
58*4882a593Smuzhiyun u32 *flag = dev->sw_base_addr + i + RKISP_ISPP_SW_REG_SIZE;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun if (*flag == SW_REG_CACHE)
61*4882a593Smuzhiyun writel(*val, base + i);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
rkispp_allow_buffer(struct rkispp_device * dev,struct rkispp_dummy_buffer * buf)65*4882a593Smuzhiyun int rkispp_allow_buffer(struct rkispp_device *dev,
66*4882a593Smuzhiyun struct rkispp_dummy_buffer *buf)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun unsigned long attrs = buf->is_need_vaddr ? 0 : DMA_ATTR_NO_KERNEL_MAPPING;
69*4882a593Smuzhiyun const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
70*4882a593Smuzhiyun struct sg_table *sg_tbl;
71*4882a593Smuzhiyun void *mem_priv;
72*4882a593Smuzhiyun int ret = 0;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun if (!buf->size) {
75*4882a593Smuzhiyun ret = -EINVAL;
76*4882a593Smuzhiyun goto err;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun if (dev->hw_dev->is_dma_contig)
80*4882a593Smuzhiyun attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
81*4882a593Smuzhiyun buf->size = PAGE_ALIGN(buf->size);
82*4882a593Smuzhiyun mem_priv = g_ops->alloc(dev->hw_dev->dev, attrs, buf->size,
83*4882a593Smuzhiyun DMA_BIDIRECTIONAL, GFP_KERNEL | GFP_DMA32);
84*4882a593Smuzhiyun if (IS_ERR_OR_NULL(mem_priv)) {
85*4882a593Smuzhiyun ret = -ENOMEM;
86*4882a593Smuzhiyun goto err;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun buf->mem_priv = mem_priv;
90*4882a593Smuzhiyun if (dev->hw_dev->is_dma_sg_ops) {
91*4882a593Smuzhiyun sg_tbl = (struct sg_table *)g_ops->cookie(mem_priv);
92*4882a593Smuzhiyun buf->dma_addr = sg_dma_address(sg_tbl->sgl);
93*4882a593Smuzhiyun g_ops->prepare(mem_priv);
94*4882a593Smuzhiyun } else {
95*4882a593Smuzhiyun buf->dma_addr = *((dma_addr_t *)g_ops->cookie(mem_priv));
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun if (buf->is_need_vaddr)
98*4882a593Smuzhiyun buf->vaddr = g_ops->vaddr(mem_priv);
99*4882a593Smuzhiyun if (buf->is_need_dbuf) {
100*4882a593Smuzhiyun buf->dbuf = g_ops->get_dmabuf(mem_priv, O_RDWR);
101*4882a593Smuzhiyun if (buf->is_need_dmafd) {
102*4882a593Smuzhiyun buf->dma_fd = dma_buf_fd(buf->dbuf, O_CLOEXEC);
103*4882a593Smuzhiyun if (buf->dma_fd < 0) {
104*4882a593Smuzhiyun dma_buf_put(buf->dbuf);
105*4882a593Smuzhiyun ret = buf->dma_fd;
106*4882a593Smuzhiyun goto err;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun get_dma_buf(buf->dbuf);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
112*4882a593Smuzhiyun "%s buf:0x%x~0x%x size:%d\n", __func__,
113*4882a593Smuzhiyun (u32)buf->dma_addr, (u32)buf->dma_addr + buf->size, buf->size);
114*4882a593Smuzhiyun return ret;
115*4882a593Smuzhiyun err:
116*4882a593Smuzhiyun dev_err(dev->dev, "%s failed ret:%d\n", __func__, ret);
117*4882a593Smuzhiyun return ret;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
rkispp_free_buffer(struct rkispp_device * dev,struct rkispp_dummy_buffer * buf)120*4882a593Smuzhiyun void rkispp_free_buffer(struct rkispp_device *dev,
121*4882a593Smuzhiyun struct rkispp_dummy_buffer *buf)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun if (buf && buf->mem_priv) {
126*4882a593Smuzhiyun v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
127*4882a593Smuzhiyun "%s buf:0x%x~0x%x\n", __func__,
128*4882a593Smuzhiyun (u32)buf->dma_addr, (u32)buf->dma_addr + buf->size);
129*4882a593Smuzhiyun if (buf->dbuf)
130*4882a593Smuzhiyun dma_buf_put(buf->dbuf);
131*4882a593Smuzhiyun g_ops->put(buf->mem_priv);
132*4882a593Smuzhiyun buf->size = 0;
133*4882a593Smuzhiyun buf->dbuf = NULL;
134*4882a593Smuzhiyun buf->vaddr = NULL;
135*4882a593Smuzhiyun buf->mem_priv = NULL;
136*4882a593Smuzhiyun buf->is_need_dbuf = false;
137*4882a593Smuzhiyun buf->is_need_vaddr = false;
138*4882a593Smuzhiyun buf->is_need_dmafd = false;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
rkispp_prepare_buffer(struct rkispp_device * dev,struct rkispp_dummy_buffer * buf)142*4882a593Smuzhiyun void rkispp_prepare_buffer(struct rkispp_device *dev,
143*4882a593Smuzhiyun struct rkispp_dummy_buffer *buf)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun if (buf && buf->mem_priv)
148*4882a593Smuzhiyun g_ops->prepare(buf->mem_priv);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
rkispp_finish_buffer(struct rkispp_device * dev,struct rkispp_dummy_buffer * buf)151*4882a593Smuzhiyun void rkispp_finish_buffer(struct rkispp_device *dev,
152*4882a593Smuzhiyun struct rkispp_dummy_buffer *buf)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun const struct vb2_mem_ops *g_ops = dev->hw_dev->mem_ops;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if (buf && buf->mem_priv)
157*4882a593Smuzhiyun g_ops->finish(buf->mem_priv);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
rkispp_attach_hw(struct rkispp_device * ispp)160*4882a593Smuzhiyun int rkispp_attach_hw(struct rkispp_device *ispp)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun struct device_node *np;
163*4882a593Smuzhiyun struct platform_device *pdev;
164*4882a593Smuzhiyun struct rkispp_hw_dev *hw;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun np = of_parse_phandle(ispp->dev->of_node, "rockchip,hw", 0);
167*4882a593Smuzhiyun if (!np || !of_device_is_available(np)) {
168*4882a593Smuzhiyun dev_err(ispp->dev, "failed to get ispp hw node\n");
169*4882a593Smuzhiyun return -ENODEV;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun pdev = of_find_device_by_node(np);
173*4882a593Smuzhiyun of_node_put(np);
174*4882a593Smuzhiyun if (!pdev) {
175*4882a593Smuzhiyun dev_err(ispp->dev, "failed to get ispp hw from node\n");
176*4882a593Smuzhiyun return -ENODEV;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun hw = platform_get_drvdata(pdev);
180*4882a593Smuzhiyun if (!hw) {
181*4882a593Smuzhiyun dev_err(ispp->dev, "failed attach ispp hw\n");
182*4882a593Smuzhiyun return -EINVAL;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun if (hw->dev_num)
186*4882a593Smuzhiyun hw->is_single = false;
187*4882a593Smuzhiyun ispp->dev_id = hw->dev_num;
188*4882a593Smuzhiyun hw->ispp[hw->dev_num] = ispp;
189*4882a593Smuzhiyun hw->dev_num++;
190*4882a593Smuzhiyun ispp->hw_dev = hw;
191*4882a593Smuzhiyun ispp->ispp_ver = hw->ispp_ver;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun return 0;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
rkispp_init_regbuf(struct rkispp_hw_dev * hw)196*4882a593Smuzhiyun static int rkispp_init_regbuf(struct rkispp_hw_dev *hw)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun struct rkisp_ispp_reg *reg_buf;
199*4882a593Smuzhiyun u32 i, buf_size;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (!rkispp_is_reg_withstream_global()) {
202*4882a593Smuzhiyun hw->reg_buf = NULL;
203*4882a593Smuzhiyun return 0;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun buf_size = RKISP_ISPP_REGBUF_NUM * sizeof(struct rkisp_ispp_reg);
207*4882a593Smuzhiyun hw->reg_buf = vmalloc(buf_size);
208*4882a593Smuzhiyun if (!hw->reg_buf)
209*4882a593Smuzhiyun return -ENOMEM;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun reg_buf = hw->reg_buf;
212*4882a593Smuzhiyun for (i = 0; i < RKISP_ISPP_REGBUF_NUM; i++) {
213*4882a593Smuzhiyun reg_buf[i].stat = ISP_ISPP_FREE;
214*4882a593Smuzhiyun reg_buf[i].dev_id = 0xFF;
215*4882a593Smuzhiyun reg_buf[i].frame_id = 0;
216*4882a593Smuzhiyun reg_buf[i].reg_size = 0;
217*4882a593Smuzhiyun reg_buf[i].sof_timestamp = 0LL;
218*4882a593Smuzhiyun reg_buf[i].frame_timestamp = 0LL;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun return 0;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
rkispp_free_regbuf(struct rkispp_hw_dev * hw)224*4882a593Smuzhiyun static void rkispp_free_regbuf(struct rkispp_hw_dev *hw)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun if (hw->reg_buf) {
227*4882a593Smuzhiyun vfree(hw->reg_buf);
228*4882a593Smuzhiyun hw->reg_buf = NULL;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
rkispp_find_regbuf_by_stat(struct rkispp_hw_dev * hw,struct rkisp_ispp_reg ** free_buf,enum rkisp_ispp_reg_stat stat)232*4882a593Smuzhiyun static int rkispp_find_regbuf_by_stat(struct rkispp_hw_dev *hw, struct rkisp_ispp_reg **free_buf,
233*4882a593Smuzhiyun enum rkisp_ispp_reg_stat stat)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun struct rkisp_ispp_reg *reg_buf = hw->reg_buf;
236*4882a593Smuzhiyun int i = 0, ret;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun *free_buf = NULL;
239*4882a593Smuzhiyun if (!hw->reg_buf || !rkispp_reg_withstream)
240*4882a593Smuzhiyun return -EINVAL;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun for (i = 0; i < RKISP_ISPP_REGBUF_NUM; i++) {
243*4882a593Smuzhiyun if (reg_buf[i].stat == stat)
244*4882a593Smuzhiyun break;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun ret = -ENODATA;
248*4882a593Smuzhiyun if (i < RKISP_ISPP_REGBUF_NUM) {
249*4882a593Smuzhiyun ret = 0;
250*4882a593Smuzhiyun *free_buf = ®_buf[i];
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun return ret;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
rkispp_free_pool(struct rkispp_hw_dev * hw)256*4882a593Smuzhiyun static void rkispp_free_pool(struct rkispp_hw_dev *hw)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun const struct vb2_mem_ops *g_ops = hw->mem_ops;
259*4882a593Smuzhiyun struct rkispp_isp_buf_pool *buf;
260*4882a593Smuzhiyun int i, j;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun if (atomic_read(&hw->refcnt))
263*4882a593Smuzhiyun return;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun for (i = 0; i < RKISPP_BUF_POOL_MAX; i++) {
266*4882a593Smuzhiyun buf = &hw->pool[i];
267*4882a593Smuzhiyun if (!buf->dbufs)
268*4882a593Smuzhiyun break;
269*4882a593Smuzhiyun if (rkispp_debug)
270*4882a593Smuzhiyun dev_info(hw->dev, "%s dbufs[%d]:0x%p\n",
271*4882a593Smuzhiyun __func__, i, buf->dbufs);
272*4882a593Smuzhiyun for (j = 0; j < hw->pool[0].group_buf_max; j++) {
273*4882a593Smuzhiyun if (buf->mem_priv[j]) {
274*4882a593Smuzhiyun g_ops->unmap_dmabuf(buf->mem_priv[j]);
275*4882a593Smuzhiyun g_ops->detach_dmabuf(buf->mem_priv[j]);
276*4882a593Smuzhiyun dma_buf_put(buf->dbufs->dbuf[j]);
277*4882a593Smuzhiyun buf->mem_priv[j] = NULL;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun buf->dbufs = NULL;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun rkispp_free_regbuf(hw);
284*4882a593Smuzhiyun hw->is_idle = true;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
rkispp_init_pool(struct rkispp_hw_dev * hw,struct rkisp_ispp_buf * dbufs)287*4882a593Smuzhiyun static int rkispp_init_pool(struct rkispp_hw_dev *hw, struct rkisp_ispp_buf *dbufs)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun const struct vb2_mem_ops *g_ops = hw->mem_ops;
290*4882a593Smuzhiyun struct rkispp_isp_buf_pool *pool;
291*4882a593Smuzhiyun struct sg_table *sg_tbl;
292*4882a593Smuzhiyun int i, ret = 0;
293*4882a593Smuzhiyun void *mem;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun INIT_LIST_HEAD(&hw->list);
296*4882a593Smuzhiyun /* init dma buf pool */
297*4882a593Smuzhiyun for (i = 0; i < RKISPP_BUF_POOL_MAX; i++) {
298*4882a593Smuzhiyun pool = &hw->pool[i];
299*4882a593Smuzhiyun if (!pool->dbufs)
300*4882a593Smuzhiyun break;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun dbufs->is_isp = true;
303*4882a593Smuzhiyun pool->dbufs = dbufs;
304*4882a593Smuzhiyun if (rkispp_debug)
305*4882a593Smuzhiyun dev_info(hw->dev, "%s dbufs[%d]:0x%p\n",
306*4882a593Smuzhiyun __func__, i, dbufs);
307*4882a593Smuzhiyun for (i = 0; i < hw->pool[0].group_buf_max; i++) {
308*4882a593Smuzhiyun mem = g_ops->attach_dmabuf(hw->dev, dbufs->dbuf[i],
309*4882a593Smuzhiyun dbufs->dbuf[i]->size, DMA_BIDIRECTIONAL);
310*4882a593Smuzhiyun if (IS_ERR(mem)) {
311*4882a593Smuzhiyun ret = PTR_ERR(mem);
312*4882a593Smuzhiyun goto err;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun pool->mem_priv[i] = mem;
315*4882a593Smuzhiyun ret = g_ops->map_dmabuf(mem);
316*4882a593Smuzhiyun if (ret)
317*4882a593Smuzhiyun goto err;
318*4882a593Smuzhiyun if (hw->is_dma_sg_ops) {
319*4882a593Smuzhiyun sg_tbl = (struct sg_table *)g_ops->cookie(mem);
320*4882a593Smuzhiyun pool->dma[i] = sg_dma_address(sg_tbl->sgl);
321*4882a593Smuzhiyun } else {
322*4882a593Smuzhiyun pool->dma[i] = *((dma_addr_t *)g_ops->cookie(mem));
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun get_dma_buf(dbufs->dbuf[i]);
325*4882a593Smuzhiyun pool->vaddr[i] = g_ops->vaddr(mem);
326*4882a593Smuzhiyun if (rkispp_debug)
327*4882a593Smuzhiyun dev_info(hw->dev, "%s dma[%d]:0x%x\n",
328*4882a593Smuzhiyun __func__, i, (u32)pool->dma[i]);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun rkispp_init_regbuf(hw);
332*4882a593Smuzhiyun hw->is_idle = true;
333*4882a593Smuzhiyun return ret;
334*4882a593Smuzhiyun err:
335*4882a593Smuzhiyun rkispp_free_pool(hw);
336*4882a593Smuzhiyun return ret;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
rkispp_queue_dmabuf(struct rkispp_hw_dev * hw,struct rkisp_ispp_buf * dbufs)339*4882a593Smuzhiyun static void rkispp_queue_dmabuf(struct rkispp_hw_dev *hw, struct rkisp_ispp_buf *dbufs)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun struct list_head *list = &hw->list;
342*4882a593Smuzhiyun struct rkispp_device *ispp;
343*4882a593Smuzhiyun struct rkispp_stream_vdev *vdev;
344*4882a593Smuzhiyun struct rkisp_ispp_buf *buf = NULL;
345*4882a593Smuzhiyun unsigned long lock_flags = 0;
346*4882a593Smuzhiyun u32 val;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun spin_lock_irqsave(&hw->buf_lock, lock_flags);
349*4882a593Smuzhiyun if (!dbufs)
350*4882a593Smuzhiyun hw->is_idle = true;
351*4882a593Smuzhiyun if (hw->is_shutdown)
352*4882a593Smuzhiyun hw->is_idle = false;
353*4882a593Smuzhiyun if (dbufs && list_empty(list) && hw->is_idle) {
354*4882a593Smuzhiyun /* ispp idle or handle same device */
355*4882a593Smuzhiyun buf = dbufs;
356*4882a593Smuzhiyun } else if (hw->is_idle && !list_empty(list)) {
357*4882a593Smuzhiyun /* ispp idle and handle first buf in list */
358*4882a593Smuzhiyun buf = list_first_entry(list,
359*4882a593Smuzhiyun struct rkisp_ispp_buf, list);
360*4882a593Smuzhiyun list_del(&buf->list);
361*4882a593Smuzhiyun if (dbufs)
362*4882a593Smuzhiyun list_add_tail(&dbufs->list, list);
363*4882a593Smuzhiyun } else if (dbufs) {
364*4882a593Smuzhiyun /* new buf into queue wait for handle */
365*4882a593Smuzhiyun list_add_tail(&dbufs->list, list);
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun if (buf) {
369*4882a593Smuzhiyun hw->is_idle = false;
370*4882a593Smuzhiyun hw->cur_dev_id = buf->index;
371*4882a593Smuzhiyun ispp = hw->ispp[buf->index];
372*4882a593Smuzhiyun vdev = &ispp->stream_vdev;
373*4882a593Smuzhiyun val = (vdev->module_ens & ISPP_MODULE_TNR) ? ISPP_MODULE_TNR :
374*4882a593Smuzhiyun ((vdev->module_ens & ISPP_MODULE_NR) ? ISPP_MODULE_NR : ISPP_MODULE_FEC);
375*4882a593Smuzhiyun vdev->stream_ops->rkispp_module_work_event(ispp, buf, NULL, val, false);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
rkispp_event_handle(struct rkispp_device * ispp,u32 cmd,void * arg)381*4882a593Smuzhiyun int rkispp_event_handle(struct rkispp_device *ispp, u32 cmd, void *arg)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun struct rkispp_hw_dev *hw = ispp->hw_dev;
384*4882a593Smuzhiyun int ret = 0;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun switch (cmd) {
387*4882a593Smuzhiyun case CMD_STREAM:
388*4882a593Smuzhiyun if (*(int *)arg)
389*4882a593Smuzhiyun atomic_inc(&hw->refcnt);
390*4882a593Smuzhiyun else
391*4882a593Smuzhiyun atomic_dec(&hw->refcnt);
392*4882a593Smuzhiyun break;
393*4882a593Smuzhiyun case CMD_INIT_POOL:
394*4882a593Smuzhiyun ret = rkispp_init_pool(hw, arg);
395*4882a593Smuzhiyun break;
396*4882a593Smuzhiyun case CMD_FREE_POOL:
397*4882a593Smuzhiyun rkispp_free_pool(hw);
398*4882a593Smuzhiyun break;
399*4882a593Smuzhiyun case CMD_QUEUE_DMABUF:
400*4882a593Smuzhiyun rkispp_queue_dmabuf(hw, arg);
401*4882a593Smuzhiyun break;
402*4882a593Smuzhiyun default:
403*4882a593Smuzhiyun ret = -EFAULT;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun return ret;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
rkispp_alloc_page_dummy_buf(struct rkispp_device * dev,u32 size)409*4882a593Smuzhiyun static int rkispp_alloc_page_dummy_buf(struct rkispp_device *dev, u32 size)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun struct rkispp_hw_dev *hw = dev->hw_dev;
412*4882a593Smuzhiyun struct rkispp_dummy_buffer *dummy_buf = &hw->dummy_buf;
413*4882a593Smuzhiyun u32 i, n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
414*4882a593Smuzhiyun struct page *page = NULL, **pages = NULL;
415*4882a593Smuzhiyun struct sg_table *sg = NULL;
416*4882a593Smuzhiyun int ret = -ENOMEM;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun page = alloc_pages(GFP_KERNEL | GFP_DMA32, 0);
419*4882a593Smuzhiyun if (!page)
420*4882a593Smuzhiyun goto err;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL);
423*4882a593Smuzhiyun if (!pages)
424*4882a593Smuzhiyun goto free_page;
425*4882a593Smuzhiyun for (i = 0; i < n_pages; i++)
426*4882a593Smuzhiyun pages[i] = page;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
429*4882a593Smuzhiyun if (!sg)
430*4882a593Smuzhiyun goto free_pages;
431*4882a593Smuzhiyun ret = sg_alloc_table_from_pages(sg, pages, n_pages, 0,
432*4882a593Smuzhiyun n_pages << PAGE_SHIFT, GFP_KERNEL);
433*4882a593Smuzhiyun if (ret)
434*4882a593Smuzhiyun goto free_sg;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun ret = dma_map_sg(hw->dev, sg->sgl, sg->nents, DMA_BIDIRECTIONAL);
437*4882a593Smuzhiyun dummy_buf->dma_addr = sg_dma_address(sg->sgl);
438*4882a593Smuzhiyun dummy_buf->mem_priv = sg;
439*4882a593Smuzhiyun dummy_buf->pages = pages;
440*4882a593Smuzhiyun v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
441*4882a593Smuzhiyun "%s buf:0x%x map cnt:%d\n", __func__,
442*4882a593Smuzhiyun (u32)dummy_buf->dma_addr, ret);
443*4882a593Smuzhiyun return 0;
444*4882a593Smuzhiyun free_sg:
445*4882a593Smuzhiyun kfree(sg);
446*4882a593Smuzhiyun free_pages:
447*4882a593Smuzhiyun kvfree(pages);
448*4882a593Smuzhiyun free_page:
449*4882a593Smuzhiyun __free_pages(page, 0);
450*4882a593Smuzhiyun err:
451*4882a593Smuzhiyun return ret;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
rkispp_free_page_dummy_buf(struct rkispp_device * dev)454*4882a593Smuzhiyun static void rkispp_free_page_dummy_buf(struct rkispp_device *dev)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun struct rkispp_dummy_buffer *dummy_buf = &dev->hw_dev->dummy_buf;
457*4882a593Smuzhiyun struct sg_table *sg = dummy_buf->mem_priv;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun if (!sg)
460*4882a593Smuzhiyun return;
461*4882a593Smuzhiyun dma_unmap_sg(dev->hw_dev->dev, sg->sgl, sg->nents, DMA_BIDIRECTIONAL);
462*4882a593Smuzhiyun sg_free_table(sg);
463*4882a593Smuzhiyun kfree(sg);
464*4882a593Smuzhiyun __free_pages(dummy_buf->pages[0], 0);
465*4882a593Smuzhiyun kvfree(dummy_buf->pages);
466*4882a593Smuzhiyun dummy_buf->mem_priv = NULL;
467*4882a593Smuzhiyun dummy_buf->pages = NULL;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
rkispp_alloc_common_dummy_buf(struct rkispp_device * dev)470*4882a593Smuzhiyun int rkispp_alloc_common_dummy_buf(struct rkispp_device *dev)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun struct rkispp_hw_dev *hw = dev->hw_dev;
473*4882a593Smuzhiyun struct rkispp_subdev *sdev = &dev->ispp_sdev;
474*4882a593Smuzhiyun struct rkispp_dummy_buffer *dummy_buf = &hw->dummy_buf;
475*4882a593Smuzhiyun u32 w = hw->max_in.w ? hw->max_in.w : sdev->out_fmt.width;
476*4882a593Smuzhiyun u32 h = hw->max_in.h ? hw->max_in.h : sdev->out_fmt.height;
477*4882a593Smuzhiyun u32 size = w * h * 2;
478*4882a593Smuzhiyun int ret = 0;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun mutex_lock(&hw->dev_lock);
481*4882a593Smuzhiyun if (dummy_buf->mem_priv)
482*4882a593Smuzhiyun goto end;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun if (hw->is_mmu) {
485*4882a593Smuzhiyun ret = rkispp_alloc_page_dummy_buf(dev, size);
486*4882a593Smuzhiyun goto end;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun dummy_buf->size = size;
490*4882a593Smuzhiyun ret = rkispp_allow_buffer(dev, dummy_buf);
491*4882a593Smuzhiyun if (!ret)
492*4882a593Smuzhiyun v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
493*4882a593Smuzhiyun "%s buf:0x%x size:%d\n", __func__,
494*4882a593Smuzhiyun (u32)dummy_buf->dma_addr, dummy_buf->size);
495*4882a593Smuzhiyun end:
496*4882a593Smuzhiyun if (ret < 0)
497*4882a593Smuzhiyun v4l2_err(&dev->v4l2_dev, "%s failed:%d\n", __func__, ret);
498*4882a593Smuzhiyun mutex_unlock(&hw->dev_lock);
499*4882a593Smuzhiyun return ret;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
rkispp_free_common_dummy_buf(struct rkispp_device * dev)502*4882a593Smuzhiyun void rkispp_free_common_dummy_buf(struct rkispp_device *dev)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun struct rkispp_hw_dev *hw = dev->hw_dev;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun mutex_lock(&hw->dev_lock);
507*4882a593Smuzhiyun if (atomic_read(&hw->refcnt) ||
508*4882a593Smuzhiyun atomic_read(&dev->stream_vdev.refcnt) > 1)
509*4882a593Smuzhiyun goto end;
510*4882a593Smuzhiyun if (hw->is_mmu)
511*4882a593Smuzhiyun rkispp_free_page_dummy_buf(dev);
512*4882a593Smuzhiyun else
513*4882a593Smuzhiyun rkispp_free_buffer(dev, &hw->dummy_buf);
514*4882a593Smuzhiyun end:
515*4882a593Smuzhiyun mutex_unlock(&hw->dev_lock);
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
rkispp_find_regbuf_by_id(struct rkispp_device * ispp,struct rkisp_ispp_reg ** free_buf,u32 dev_id,u32 frame_id)518*4882a593Smuzhiyun int rkispp_find_regbuf_by_id(struct rkispp_device *ispp, struct rkisp_ispp_reg **free_buf,
519*4882a593Smuzhiyun u32 dev_id, u32 frame_id)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun struct rkispp_hw_dev *hw = ispp->hw_dev;
522*4882a593Smuzhiyun struct rkisp_ispp_reg *reg_buf = hw->reg_buf;
523*4882a593Smuzhiyun int i = 0, ret;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun *free_buf = NULL;
526*4882a593Smuzhiyun if (!hw->reg_buf)
527*4882a593Smuzhiyun return -EINVAL;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun for (i = 0; i < RKISP_ISPP_REGBUF_NUM; i++) {
530*4882a593Smuzhiyun if (reg_buf[i].dev_id == dev_id && reg_buf[i].frame_id == frame_id)
531*4882a593Smuzhiyun break;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun ret = -ENODATA;
535*4882a593Smuzhiyun if (i < RKISP_ISPP_REGBUF_NUM) {
536*4882a593Smuzhiyun ret = 0;
537*4882a593Smuzhiyun *free_buf = ®_buf[i];
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun return ret;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
rkispp_release_regbuf(struct rkispp_device * ispp,struct rkisp_ispp_reg * freebuf)543*4882a593Smuzhiyun void rkispp_release_regbuf(struct rkispp_device *ispp, struct rkisp_ispp_reg *freebuf)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun struct rkispp_hw_dev *hw = ispp->hw_dev;
546*4882a593Smuzhiyun struct rkisp_ispp_reg *reg_buf = hw->reg_buf;
547*4882a593Smuzhiyun int i;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun if (!hw->reg_buf)
550*4882a593Smuzhiyun return;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun for (i = 0; i < RKISP_ISPP_REGBUF_NUM; i++) {
553*4882a593Smuzhiyun if (reg_buf[i].dev_id == freebuf->dev_id &&
554*4882a593Smuzhiyun reg_buf[i].frame_timestamp < freebuf->frame_timestamp) {
555*4882a593Smuzhiyun reg_buf[i].frame_id = 0;
556*4882a593Smuzhiyun reg_buf[i].stat = ISP_ISPP_FREE;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
rkispp_request_regbuf(struct rkispp_device * dev,struct rkisp_ispp_reg ** free_buf)561*4882a593Smuzhiyun void rkispp_request_regbuf(struct rkispp_device *dev, struct rkisp_ispp_reg **free_buf)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun struct rkispp_hw_dev *hw = dev->hw_dev;
564*4882a593Smuzhiyun int ret;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun if (!hw->reg_buf) {
567*4882a593Smuzhiyun *free_buf = NULL;
568*4882a593Smuzhiyun return;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun ret = rkispp_find_regbuf_by_stat(hw, free_buf, ISP_ISPP_FREE);
572*4882a593Smuzhiyun if (!ret) {
573*4882a593Smuzhiyun (*free_buf)->stat = ISP_ISPP_INUSE;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
rkispp_is_reg_withstream_global(void)577*4882a593Smuzhiyun bool rkispp_is_reg_withstream_global(void)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun return rkispp_reg_withstream;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
rkispp_is_reg_withstream_local(struct device * dev)582*4882a593Smuzhiyun bool rkispp_is_reg_withstream_local(struct device *dev)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun const char *node_name = dev_name(dev);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun if (!node_name)
587*4882a593Smuzhiyun return false;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun if (!memcmp(rkispp_reg_withstream_video_name, node_name,
590*4882a593Smuzhiyun strlen(node_name)))
591*4882a593Smuzhiyun return true;
592*4882a593Smuzhiyun else
593*4882a593Smuzhiyun return false;
594*4882a593Smuzhiyun }
595