1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd. */
3
4 #include <linux/clk.h>
5 #include <linux/delay.h>
6 #include <linux/pm_runtime.h>
7 #include <linux/slab.h>
8 #include <media/v4l2-common.h>
9 #include <media/v4l2-event.h>
10 #include <media/v4l2-fh.h>
11 #include <media/v4l2-ioctl.h>
12 #include <media/v4l2-mc.h>
13 #include <media/v4l2-subdev.h>
14 #include <media/videobuf2-dma-contig.h>
15 #include <media/videobuf2-dma-sg.h>
16 #include <linux/rk-isp1-config.h>
17 #include <uapi/linux/rk-video-format.h>
18
19 #include "dev.h"
20 #include "regs.h"
21
22
23 /*
24 * DDR->| |->MB------->DDR
25 * |->TNR->DDR->NR->SHARP->DDR->FEC->|->SCL0----->DDR
26 * ISP->| |->SCL1----->DDR
27 * |->SCL2----->DDR
28 */
29
30 static void rkispp_module_work_event(struct rkispp_device *dev,
31 void *buf_rd, void *buf_wr,
32 u32 module, bool is_isr);
33
set_y_addr(struct rkispp_stream * stream,u32 val)34 static void set_y_addr(struct rkispp_stream *stream, u32 val)
35 {
36 rkispp_write(stream->isppdev, stream->config->reg.cur_y_base, val);
37 }
38
set_uv_addr(struct rkispp_stream * stream,u32 val)39 static void set_uv_addr(struct rkispp_stream *stream, u32 val)
40 {
41 rkispp_write(stream->isppdev, stream->config->reg.cur_uv_base, val);
42 }
43
rkispp_frame_done_early(struct hrtimer * timer)44 static enum hrtimer_restart rkispp_frame_done_early(struct hrtimer *timer)
45 {
46 struct rkispp_stream_vdev *vdev =
47 container_of(timer, struct rkispp_stream_vdev, frame_qst);
48 struct rkispp_stream *stream = &vdev->stream[0];
49 struct rkispp_device *dev = stream->isppdev;
50 void __iomem *base = dev->hw_dev->base_addr;
51 bool is_fec_en = (vdev->module_ens & ISPP_MODULE_FEC);
52 enum hrtimer_restart ret = HRTIMER_NORESTART;
53 u32 threshold = vdev->wait_line / 128;
54 u32 tile, tile_mask, working, work_mask;
55 u32 i, seq, ycnt, shift, time, max_time;
56 u64 t, ns = ktime_get_ns();
57
58 working = readl(base + RKISPP_CTRL_SYS_STATUS);
59 tile = readl(base + RKISPP_CTRL_SYS_CTL_STA0);
60 if (is_fec_en) {
61 shift = 16;
62 work_mask = FEC_WORKING;
63 tile_mask = FEC_TILE_LINE_CNT_MASK;
64 t = vdev->fec.dbg.timestamp;
65 seq = vdev->fec.dbg.id;
66 max_time = 6000000;
67 } else {
68 shift = 8;
69 work_mask = NR_WORKING;
70 tile_mask = NR_TILE_LINE_CNT_MASK;
71 t = vdev->nr.dbg.timestamp;
72 seq = vdev->nr.dbg.id;
73 max_time = 2000000;
74 }
75 working &= work_mask;
76 tile &= tile_mask;
77 ycnt = tile >> shift;
78 time = (u32)(ns - t);
79 if (dev->ispp_sdev.state == ISPP_STOP) {
80 vdev->is_done_early = false;
81 goto end;
82 } else if (working && ycnt < threshold) {
83 if (!ycnt)
84 ns = max_time;
85 else
86 ns = time * (threshold - ycnt) / ycnt + 100 * 1000;
87 if (ns > max_time)
88 ns = max_time;
89 hrtimer_forward(timer, timer->base->get_time(), ns_to_ktime(ns));
90 ret = HRTIMER_RESTART;
91 } else {
92 v4l2_dbg(3, rkispp_debug, &stream->isppdev->v4l2_dev,
93 "%s seq:%d line:%d ycnt:%d time:%dus\n",
94 __func__, seq, vdev->wait_line, ycnt * 128, time / 1000);
95 for (i = 0; i < dev->stream_max; i++) {
96 stream = &vdev->stream[i];
97 if (!stream->streaming || !stream->is_cfg || stream->stopping)
98 continue;
99 rkispp_frame_end(stream, FRAME_WORK);
100 }
101 }
102 end:
103 return ret;
104 }
105
update_mi(struct rkispp_stream * stream)106 static void update_mi(struct rkispp_stream *stream)
107 {
108 struct rkispp_device *dev = stream->isppdev;
109 struct rkispp_dummy_buffer *dummy_buf;
110 u32 val;
111
112 if (stream->curr_buf) {
113 val = stream->curr_buf->buff_addr[RKISPP_PLANE_Y];
114 set_y_addr(stream, val);
115 val = stream->curr_buf->buff_addr[RKISPP_PLANE_UV];
116 set_uv_addr(stream, val);
117 }
118
119 if (stream->type == STREAM_OUTPUT && !stream->curr_buf) {
120 dummy_buf = &dev->hw_dev->dummy_buf;
121 set_y_addr(stream, dummy_buf->dma_addr);
122 set_uv_addr(stream, dummy_buf->dma_addr);
123 }
124
125 v4l2_dbg(2, rkispp_debug, &stream->isppdev->v4l2_dev,
126 "%s stream:%d Y:0x%x UV:0x%x\n",
127 __func__, stream->id,
128 rkispp_read(dev, stream->config->reg.cur_y_base),
129 rkispp_read(dev, stream->config->reg.cur_uv_base));
130 }
131
is_en_done_early(struct rkispp_device * dev)132 static bool is_en_done_early(struct rkispp_device *dev)
133 {
134 u32 height = dev->ispp_sdev.out_fmt.height;
135 u32 line = dev->stream_vdev.wait_line;
136 bool en = false;
137
138 if (line) {
139 if (line > height - 128)
140 dev->stream_vdev.wait_line = height - 128;
141 en = true;
142 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
143 "wait %d line to wake up frame\n", line);
144 }
145
146 return en;
147 }
148
tnr_free_buf(struct rkispp_device * dev)149 static void tnr_free_buf(struct rkispp_device *dev)
150 {
151 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
152 struct rkisp_ispp_buf *dbufs;
153 struct list_head *list;
154 int i;
155
156 list = &vdev->tnr.list_rd;
157 if (vdev->tnr.cur_rd) {
158 list_add_tail(&vdev->tnr.cur_rd->list, list);
159 if (vdev->tnr.nxt_rd == vdev->tnr.cur_rd)
160 vdev->tnr.nxt_rd = NULL;
161 vdev->tnr.cur_rd = NULL;
162 }
163 if (vdev->tnr.nxt_rd) {
164 list_add_tail(&vdev->tnr.nxt_rd->list, list);
165 vdev->tnr.nxt_rd = NULL;
166 }
167 while (!list_empty(list)) {
168 dbufs = get_list_buf(list, true);
169 v4l2_subdev_call(dev->ispp_sdev.remote_sd,
170 video, s_rx_buffer, dbufs, NULL);
171 }
172
173 list = &vdev->tnr.list_wr;
174 if (vdev->tnr.cur_wr) {
175 list_add_tail(&vdev->tnr.cur_wr->list, list);
176 vdev->tnr.cur_wr = NULL;
177 }
178 while (!list_empty(list)) {
179 dbufs = get_list_buf(list, true);
180 kfree(dbufs);
181 }
182 list = &vdev->tnr.list_rpt;
183 while (!list_empty(list)) {
184 dbufs = get_list_buf(list, true);
185 kfree(dbufs);
186 }
187
188 for (i = 0; i < sizeof(vdev->tnr.buf) /
189 sizeof(struct rkispp_dummy_buffer); i++)
190 rkispp_free_buffer(dev, &vdev->tnr.buf.iir + i);
191
192 vdev->tnr.is_buf_init = false;
193 vdev->tnr.is_trigger = false;
194 }
195
tnr_init_buf(struct rkispp_device * dev,u32 pic_size,u32 gain_size)196 static int tnr_init_buf(struct rkispp_device *dev,
197 u32 pic_size, u32 gain_size)
198 {
199 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
200 struct rkisp_ispp_buf *dbufs;
201 struct rkispp_dummy_buffer *buf;
202 int i, j, ret, cnt = RKISPP_BUF_MAX;
203 u32 buf_idx = 0;
204
205 if (dev->inp == INP_ISP && dev->isp_mode & ISP_ISPP_QUICK)
206 cnt = 1;
207 for (i = 0; i < cnt; i++) {
208 dbufs = kzalloc(sizeof(*dbufs), GFP_KERNEL);
209 if (!dbufs) {
210 ret = -ENOMEM;
211 goto err;
212 }
213 dbufs->is_isp = false;
214 for (j = 0; j < GROUP_BUF_MAX; j++) {
215 buf = &vdev->tnr.buf.wr[i][j];
216 buf->is_need_dbuf = true;
217 buf->is_need_dmafd = false;
218 buf->is_need_vaddr = true;
219 buf->size = !j ? pic_size : PAGE_ALIGN(gain_size);
220 buf->index = buf_idx++;
221 ret = rkispp_allow_buffer(dev, buf);
222 if (ret) {
223 kfree(dbufs);
224 goto err;
225 }
226 dbufs->dbuf[j] = buf->dbuf;
227 dbufs->didx[j] = buf->index;
228 }
229 list_add_tail(&dbufs->list, &vdev->tnr.list_wr);
230 }
231
232 if (dev->inp == INP_ISP && dev->isp_mode & ISP_ISPP_QUICK) {
233 buf = &vdev->tnr.buf.iir;
234 buf->size = pic_size;
235 ret = rkispp_allow_buffer(dev, buf);
236 if (ret < 0)
237 goto err;
238 }
239
240 buf = &vdev->tnr.buf.gain_kg;
241 buf->is_need_vaddr = true;
242 buf->is_need_dbuf = true;
243 buf->is_need_dmafd = false;
244 buf->size = PAGE_ALIGN(gain_size * 4);
245 buf->index = buf_idx++;
246 ret = rkispp_allow_buffer(dev, buf);
247 if (ret < 0)
248 goto err;
249
250 vdev->tnr.is_buf_init = true;
251 return 0;
252 err:
253 tnr_free_buf(dev);
254 v4l2_err(&dev->v4l2_dev, "%s failed\n", __func__);
255 return ret;
256 }
257
config_tnr(struct rkispp_device * dev)258 static int config_tnr(struct rkispp_device *dev)
259 {
260 struct rkispp_hw_dev *hw = dev->hw_dev;
261 struct rkispp_stream_vdev *vdev;
262 struct rkispp_stream *stream = NULL;
263 int ret, mult = 1;
264 u32 width, height, fmt;
265 u32 pic_size, gain_size;
266 u32 addr_offs, w, h, val;
267 u32 max_w, max_h;
268
269 vdev = &dev->stream_vdev;
270 vdev->tnr.is_end = true;
271 vdev->tnr.is_3to1 =
272 ((vdev->module_ens & ISPP_MODULE_TNR_3TO1) ==
273 ISPP_MODULE_TNR_3TO1);
274 if (!(vdev->module_ens & ISPP_MODULE_TNR))
275 return 0;
276
277 if (dev->inp == INP_DDR) {
278 vdev->tnr.is_3to1 = false;
279 stream = &vdev->stream[STREAM_II];
280 fmt = stream->out_cap_fmt.wr_fmt;
281 } else {
282 fmt = dev->isp_mode & (FMT_YUV422 | FMT_FBC);
283 }
284
285 width = dev->ispp_sdev.in_fmt.width;
286 height = dev->ispp_sdev.in_fmt.height;
287 max_w = hw->max_in.w ? hw->max_in.w : width;
288 max_h = hw->max_in.h ? hw->max_in.h : height;
289 w = (fmt & FMT_FBC) ? ALIGN(max_w, 16) : max_w;
290 h = (fmt & FMT_FBC) ? ALIGN(max_h, 16) : max_h;
291 addr_offs = (fmt & FMT_FBC) ? w * h >> 4 : w * h;
292 pic_size = (fmt & FMT_YUV422) ? w * h * 2 : w * h * 3 >> 1;
293 vdev->tnr.uv_offset = addr_offs;
294 if (fmt & FMT_FBC)
295 pic_size += w * h >> 4;
296
297 gain_size = ALIGN(width, 64) * ALIGN(height, 128) >> 4;
298 if (fmt & FMT_YUYV)
299 mult = 2;
300
301 if (vdev->module_ens & (ISPP_MODULE_NR | ISPP_MODULE_SHP)) {
302 ret = tnr_init_buf(dev, pic_size, gain_size);
303 if (ret)
304 return ret;
305 if (dev->inp == INP_ISP &&
306 dev->isp_mode & ISP_ISPP_QUICK) {
307 rkispp_set_bits(dev, RKISPP_CTRL_QUICK,
308 GLB_QUICK_MODE_MASK,
309 GLB_QUICK_MODE(0));
310
311 val = hw->pool[0].dma[GROUP_BUF_PIC];
312 rkispp_write(dev, RKISPP_TNR_CUR_Y_BASE, val);
313 rkispp_write(dev, RKISPP_TNR_CUR_UV_BASE, val + addr_offs);
314
315 val = hw->pool[0].dma[GROUP_BUF_GAIN];
316 rkispp_write(dev, RKISPP_TNR_GAIN_CUR_Y_BASE, val);
317
318 if (vdev->tnr.is_3to1) {
319 val = hw->pool[1].dma[GROUP_BUF_PIC];
320 rkispp_write(dev, RKISPP_TNR_NXT_Y_BASE, val);
321 rkispp_write(dev, RKISPP_TNR_NXT_UV_BASE, val + addr_offs);
322 val = hw->pool[1].dma[GROUP_BUF_GAIN];
323 rkispp_write(dev, RKISPP_TNR_GAIN_NXT_Y_BASE, val);
324 }
325 }
326
327 val = vdev->tnr.buf.gain_kg.dma_addr;
328 rkispp_write(dev, RKISPP_TNR_GAIN_KG_Y_BASE, val);
329
330 val = vdev->tnr.buf.wr[0][GROUP_BUF_PIC].dma_addr;
331 rkispp_write(dev, RKISPP_TNR_WR_Y_BASE, val);
332 rkispp_write(dev, RKISPP_TNR_WR_UV_BASE, val + addr_offs);
333 if (vdev->tnr.buf.iir.mem_priv)
334 val = vdev->tnr.buf.iir.dma_addr;
335 rkispp_write(dev, RKISPP_TNR_IIR_Y_BASE, val);
336 rkispp_write(dev, RKISPP_TNR_IIR_UV_BASE, val + addr_offs);
337
338 val = vdev->tnr.buf.wr[0][GROUP_BUF_GAIN].dma_addr;
339 rkispp_write(dev, RKISPP_TNR_GAIN_WR_Y_BASE, val);
340
341 rkispp_write(dev, RKISPP_TNR_WR_VIR_STRIDE, ALIGN(width * mult, 16) >> 2);
342 rkispp_set_bits(dev, RKISPP_TNR_CTRL, FMT_WR_MASK, fmt << 4 | SW_TNR_1ST_FRM);
343 }
344
345 if (stream) {
346 stream->config->frame_end_id = TNR_INT;
347 stream->config->reg.cur_y_base = RKISPP_TNR_CUR_Y_BASE;
348 stream->config->reg.cur_uv_base = RKISPP_TNR_CUR_UV_BASE;
349 stream->config->reg.cur_y_base_shd = RKISPP_TNR_CUR_Y_BASE_SHD;
350 stream->config->reg.cur_uv_base_shd = RKISPP_TNR_CUR_UV_BASE_SHD;
351 }
352
353 rkispp_set_bits(dev, RKISPP_TNR_CTRL, FMT_RD_MASK, fmt);
354 if (fmt & FMT_FBC) {
355 rkispp_write(dev, RKISPP_TNR_CUR_VIR_STRIDE, 0);
356 rkispp_write(dev, RKISPP_TNR_IIR_VIR_STRIDE, 0);
357 rkispp_write(dev, RKISPP_TNR_NXT_VIR_STRIDE, 0);
358 } else {
359 rkispp_write(dev, RKISPP_TNR_CUR_VIR_STRIDE, ALIGN(width * mult, 16) >> 2);
360 rkispp_write(dev, RKISPP_TNR_IIR_VIR_STRIDE, ALIGN(width * mult, 16) >> 2);
361 rkispp_write(dev, RKISPP_TNR_NXT_VIR_STRIDE, ALIGN(width * mult, 16) >> 2);
362 }
363 rkispp_set_bits(dev, RKISPP_TNR_CORE_CTRL, SW_TNR_MODE,
364 vdev->tnr.is_3to1 ? SW_TNR_MODE : 0);
365 rkispp_write(dev, RKISPP_TNR_GAIN_CUR_VIR_STRIDE, ALIGN(width, 64) >> 4);
366 rkispp_write(dev, RKISPP_TNR_GAIN_NXT_VIR_STRIDE, ALIGN(width, 64) >> 4);
367 rkispp_write(dev, RKISPP_TNR_GAIN_KG_VIR_STRIDE, ALIGN(width, 16) * 6);
368 rkispp_write(dev, RKISPP_TNR_GAIN_WR_VIR_STRIDE, ALIGN(width, 64) >> 4);
369 rkispp_write(dev, RKISPP_CTRL_TNR_SIZE, height << 16 | width);
370
371 if (vdev->monitor.is_en) {
372 init_completion(&vdev->monitor.tnr.cmpl);
373 schedule_work(&vdev->monitor.tnr.work);
374 }
375 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
376 "%s size:%dx%d ctrl:0x%x core_ctrl:0x%x\n",
377 __func__, width, height,
378 rkispp_read(dev, RKISPP_TNR_CTRL),
379 rkispp_read(dev, RKISPP_TNR_CORE_CTRL));
380 return 0;
381 }
382
nr_free_buf(struct rkispp_device * dev)383 static void nr_free_buf(struct rkispp_device *dev)
384 {
385 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
386 struct rkisp_ispp_buf *dbufs;
387 struct list_head *list;
388 int i;
389
390 list = &vdev->nr.list_rd;
391 if (vdev->nr.cur_rd) {
392 list_add_tail(&vdev->nr.cur_rd->list, list);
393 vdev->nr.cur_rd = NULL;
394 }
395 while (!list_empty(list)) {
396 dbufs = get_list_buf(list, true);
397 if (dbufs->is_isp)
398 v4l2_subdev_call(dev->ispp_sdev.remote_sd,
399 video, s_rx_buffer, dbufs, NULL);
400 else
401 kfree(dbufs);
402 }
403
404 list = &vdev->nr.list_wr;
405 if (vdev->nr.cur_wr)
406 vdev->nr.cur_wr = NULL;
407 while (!list_empty(list))
408 get_list_buf(list, false);
409 list = &vdev->nr.list_rpt;
410 while (!list_empty(list))
411 get_list_buf(list, false);
412
413 for (i = 0; i < sizeof(vdev->nr.buf) /
414 sizeof(struct rkispp_dummy_buffer); i++)
415 rkispp_free_buffer(dev, &vdev->nr.buf.tmp_yuv + i);
416
417 vdev->nr.is_buf_init = false;
418 }
419
nr_init_buf(struct rkispp_device * dev,u32 size)420 static int nr_init_buf(struct rkispp_device *dev, u32 size)
421 {
422 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
423 struct rkispp_dummy_buffer *buf;
424 int i, ret, cnt;
425
426 switch (vdev->module_ens & ISPP_MODULE_FEC_ST) {
427 case ISPP_MODULE_FEC_ST:
428 cnt = RKISPP_FEC_BUF_MAX;
429 break;
430 case ISPP_MODULE_FEC:
431 cnt = RKISPP_BUF_MAX;
432 break;
433 default:
434 cnt = 0;
435 }
436
437 for (i = 0; i < cnt; i++) {
438 buf = &vdev->nr.buf.wr[i];
439 buf->size = size;
440 buf->index = i;
441 buf->is_need_dbuf = true;
442 buf->is_need_vaddr = true;
443 buf->is_need_dmafd = false;
444 ret = rkispp_allow_buffer(dev, buf);
445 if (ret)
446 goto err;
447 list_add_tail(&buf->list, &vdev->nr.list_wr);
448 }
449
450 buf = &vdev->nr.buf.tmp_yuv;
451 cnt = DIV_ROUND_UP(dev->ispp_sdev.in_fmt.width, 32);
452 buf->size = PAGE_ALIGN(cnt * 42 * 32);
453 ret = rkispp_allow_buffer(dev, buf);
454 if (ret)
455 goto err;
456
457 vdev->nr.is_buf_init = true;
458 return 0;
459 err:
460 nr_free_buf(dev);
461 v4l2_err(&dev->v4l2_dev, "%s failed\n", __func__);
462 return ret;
463 }
464
config_nr_shp(struct rkispp_device * dev)465 static int config_nr_shp(struct rkispp_device *dev)
466 {
467 struct rkispp_hw_dev *hw = dev->hw_dev;
468 struct rkispp_stream_vdev *vdev;
469 struct rkispp_stream *stream = NULL;
470 u32 width, height, fmt;
471 u32 pic_size, addr_offs;
472 u32 w, h, val;
473 u32 max_w, max_h;
474 int ret, mult = 1;
475
476 vdev = &dev->stream_vdev;
477 vdev->nr.is_end = true;
478 if (!(vdev->module_ens & (ISPP_MODULE_NR | ISPP_MODULE_SHP)))
479 return 0;
480
481 vdev->is_done_early = is_en_done_early(dev);
482
483 if (dev->inp == INP_DDR) {
484 stream = &vdev->stream[STREAM_II];
485 fmt = stream->out_cap_fmt.wr_fmt;
486 } else {
487 fmt = dev->isp_mode & (FMT_YUV422 | FMT_FBC);
488 }
489
490 width = dev->ispp_sdev.in_fmt.width;
491 height = dev->ispp_sdev.in_fmt.height;
492 w = width;
493 h = height;
494 max_w = hw->max_in.w ? hw->max_in.w : w;
495 max_h = hw->max_in.h ? hw->max_in.h : h;
496 if (fmt & FMT_FBC) {
497 max_w = ALIGN(max_w, 16);
498 max_h = ALIGN(max_h, 16);
499 w = ALIGN(w, 16);
500 h = ALIGN(h, 16);
501 }
502 addr_offs = (fmt & FMT_FBC) ? max_w * max_h >> 4 : max_w * max_h;
503 pic_size = (fmt & FMT_YUV422) ? w * h * 2 : w * h * 3 >> 1;
504 vdev->nr.uv_offset = addr_offs;
505
506 if (fmt & FMT_YUYV)
507 mult = 2;
508
509 ret = nr_init_buf(dev, pic_size);
510 if (ret)
511 return ret;
512
513 if (vdev->module_ens & ISPP_MODULE_TNR) {
514 rkispp_write(dev, RKISPP_NR_ADDR_BASE_Y,
515 rkispp_read(dev, RKISPP_TNR_WR_Y_BASE));
516 rkispp_write(dev, RKISPP_NR_ADDR_BASE_UV,
517 rkispp_read(dev, RKISPP_TNR_WR_UV_BASE));
518 rkispp_write(dev, RKISPP_NR_ADDR_BASE_GAIN,
519 rkispp_read(dev, RKISPP_TNR_GAIN_WR_Y_BASE));
520 rkispp_set_bits(dev, RKISPP_CTRL_QUICK, 0, GLB_NR_SD32_TNR);
521 } else {
522 /* tnr need to set same format with nr in the fbc mode */
523 rkispp_set_bits(dev, RKISPP_TNR_CTRL, FMT_RD_MASK, fmt);
524 rkispp_write(dev, RKISPP_CTRL_TNR_SIZE, height << 16 | width);
525 if (dev->inp == INP_ISP) {
526 if (dev->isp_mode & ISP_ISPP_QUICK)
527 rkispp_set_bits(dev, RKISPP_CTRL_QUICK,
528 GLB_QUICK_MODE_MASK,
529 GLB_QUICK_MODE(2));
530 else
531 rkispp_set_bits(dev, RKISPP_NR_UVNR_CTRL_PARA,
532 0, SW_UVNR_SD32_SELF_EN);
533
534 val = hw->pool[0].dma[GROUP_BUF_PIC];
535 rkispp_write(dev, RKISPP_NR_ADDR_BASE_Y, val);
536 rkispp_write(dev, RKISPP_NR_ADDR_BASE_UV, val + addr_offs);
537 val = hw->pool[0].dma[GROUP_BUF_GAIN];
538 rkispp_write(dev, RKISPP_NR_ADDR_BASE_GAIN, val);
539 rkispp_clear_bits(dev, RKISPP_CTRL_QUICK, GLB_NR_SD32_TNR);
540 } else if (stream) {
541 stream->config->frame_end_id = NR_INT;
542 stream->config->reg.cur_y_base = RKISPP_NR_ADDR_BASE_Y;
543 stream->config->reg.cur_uv_base = RKISPP_NR_ADDR_BASE_UV;
544 stream->config->reg.cur_y_base_shd = RKISPP_NR_ADDR_BASE_Y_SHD;
545 stream->config->reg.cur_uv_base_shd = RKISPP_NR_ADDR_BASE_UV_SHD;
546 }
547 }
548
549 rkispp_clear_bits(dev, RKISPP_CTRL_QUICK, GLB_FEC2SCL_EN);
550 if (vdev->module_ens & ISPP_MODULE_FEC) {
551 addr_offs = width * height;
552 vdev->fec.uv_offset = addr_offs;
553 val = vdev->nr.buf.wr[0].dma_addr;
554 rkispp_write(dev, RKISPP_SHARP_WR_Y_BASE, val);
555 rkispp_write(dev, RKISPP_SHARP_WR_UV_BASE, val + addr_offs);
556 rkispp_write(dev, RKISPP_SHARP_WR_VIR_STRIDE, ALIGN(width * mult, 16) >> 2);
557 rkispp_set_bits(dev, RKISPP_SHARP_CTRL, SW_SHP_WR_FORMAT_MASK, fmt & (~FMT_FBC));
558
559 rkispp_write(dev, RKISPP_FEC_RD_Y_BASE, val);
560 rkispp_write(dev, RKISPP_FEC_RD_UV_BASE, val + addr_offs);
561 } else {
562 stream = &vdev->stream[STREAM_MB];
563 if (!stream->streaming) {
564 val = hw->dummy_buf.dma_addr;
565 rkispp_write(dev, RKISPP_SHARP_WR_Y_BASE, val);
566 rkispp_write(dev, RKISPP_SHARP_WR_UV_BASE, val);
567 rkispp_write(dev, RKISPP_SHARP_WR_VIR_STRIDE, ALIGN(width * mult, 16) >> 2);
568 if (dev->inp == INP_ISP)
569 rkispp_set_bits(dev, RKISPP_SHARP_CTRL,
570 SW_SHP_WR_FORMAT_MASK, FMT_FBC);
571 }
572 }
573
574 val = vdev->nr.buf.tmp_yuv.dma_addr;
575 rkispp_write(dev, RKISPP_SHARP_TMP_YUV_BASE, val);
576
577 /* fix to use new nr algorithm */
578 rkispp_set_bits(dev, RKISPP_NR_CTRL, NR_NEW_ALGO, NR_NEW_ALGO);
579 rkispp_set_bits(dev, RKISPP_NR_CTRL, FMT_RD_MASK, fmt);
580 if (fmt & FMT_FBC) {
581 rkispp_write(dev, RKISPP_NR_VIR_STRIDE, 0);
582 rkispp_write(dev, RKISPP_FBC_VIR_HEIGHT, max_h);
583 } else {
584 rkispp_write(dev, RKISPP_NR_VIR_STRIDE, ALIGN(width * mult, 16) >> 2);
585 }
586 rkispp_write(dev, RKISPP_NR_VIR_STRIDE_GAIN, ALIGN(width, 64) >> 4);
587 rkispp_write(dev, RKISPP_CTRL_SIZE, height << 16 | width);
588
589 if (vdev->monitor.is_en) {
590 init_completion(&vdev->monitor.nr.cmpl);
591 schedule_work(&vdev->monitor.nr.work);
592 }
593 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
594 "%s size:%dx%d\n"
595 "nr ctrl:0x%x ctrl_para:0x%x\n"
596 "shp ctrl:0x%x core_ctrl:0x%x\n",
597 __func__, width, height,
598 rkispp_read(dev, RKISPP_NR_CTRL),
599 rkispp_read(dev, RKISPP_NR_UVNR_CTRL_PARA),
600 rkispp_read(dev, RKISPP_SHARP_CTRL),
601 rkispp_read(dev, RKISPP_SHARP_CORE_CTRL));
602 return 0;
603 }
604
fec_free_buf(struct rkispp_device * dev)605 static void fec_free_buf(struct rkispp_device *dev)
606 {
607 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
608 struct list_head *list = &vdev->fec.list_rd;
609
610 if (vdev->fec.cur_rd)
611 vdev->fec.cur_rd = NULL;
612 while (!list_empty(list))
613 get_list_buf(list, false);
614 }
615
config_fec(struct rkispp_device * dev)616 static int config_fec(struct rkispp_device *dev)
617 {
618 struct rkispp_stream_vdev *vdev;
619 struct rkispp_stream *stream = NULL;
620 u32 in_width, in_height, fmt, mult = 1;
621 u32 out_width, out_height;
622
623 vdev = &dev->stream_vdev;
624 vdev->fec.is_end = true;
625 if (!(vdev->module_ens & ISPP_MODULE_FEC))
626 return 0;
627
628 if (dev->inp == INP_DDR) {
629 stream = &vdev->stream[STREAM_II];
630 fmt = stream->out_cap_fmt.wr_fmt;
631 } else {
632 fmt = dev->isp_mode & FMT_YUV422;
633 }
634
635 in_width = dev->ispp_sdev.in_fmt.width;
636 in_height = dev->ispp_sdev.in_fmt.height;
637 out_width = dev->ispp_sdev.out_fmt.width;
638 out_height = dev->ispp_sdev.out_fmt.height;
639
640 if (vdev->module_ens & (ISPP_MODULE_NR | ISPP_MODULE_SHP)) {
641 rkispp_write(dev, RKISPP_FEC_RD_Y_BASE,
642 rkispp_read(dev, RKISPP_SHARP_WR_Y_BASE));
643 rkispp_write(dev, RKISPP_FEC_RD_UV_BASE,
644 rkispp_read(dev, RKISPP_SHARP_WR_UV_BASE));
645 } else if (stream) {
646 stream->config->frame_end_id = FEC_INT;
647 stream->config->reg.cur_y_base = RKISPP_FEC_RD_Y_BASE;
648 stream->config->reg.cur_uv_base = RKISPP_FEC_RD_UV_BASE;
649 stream->config->reg.cur_y_base_shd = RKISPP_FEC_RD_Y_BASE_SHD;
650 stream->config->reg.cur_uv_base_shd = RKISPP_FEC_RD_UV_BASE_SHD;
651 }
652
653 if (fmt & FMT_YUYV)
654 mult = 2;
655 rkispp_set_bits(dev, RKISPP_FEC_CTRL, FMT_RD_MASK, fmt);
656 rkispp_write(dev, RKISPP_FEC_RD_VIR_STRIDE, ALIGN(in_width * mult, 16) >> 2);
657 rkispp_write(dev, RKISPP_FEC_PIC_SIZE, out_height << 16 | out_width);
658 rkispp_set_bits(dev, RKISPP_CTRL_QUICK, 0, GLB_FEC2SCL_EN);
659
660 if (vdev->monitor.is_en) {
661 init_completion(&vdev->monitor.fec.cmpl);
662 schedule_work(&vdev->monitor.fec.work);
663 }
664 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
665 "%s size:%dx%d->%dx%d ctrl:0x%x core_ctrl:0x%x\n",
666 __func__, in_width, in_height, out_width, out_height,
667 rkispp_read(dev, RKISPP_FEC_CTRL),
668 rkispp_read(dev, RKISPP_FEC_CORE_CTRL));
669 return 0;
670 }
671
config_modules(struct rkispp_device * dev)672 static int config_modules(struct rkispp_device *dev)
673 {
674 struct rkispp_params_vdev *params_vdev;
675 int ret;
676
677 v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
678 "stream module ens:0x%x\n", dev->stream_vdev.module_ens);
679 dev->stream_vdev.monitor.monitoring_module = 0;
680 dev->stream_vdev.monitor.restart_module = 0;
681 dev->stream_vdev.monitor.is_restart = false;
682 dev->stream_vdev.monitor.retry = 0;
683 dev->stream_vdev.monitor.is_en = rkispp_monitor;
684 init_completion(&dev->stream_vdev.monitor.cmpl);
685
686 ret = config_tnr(dev);
687 if (ret < 0)
688 return ret;
689
690 ret = config_nr_shp(dev);
691 if (ret < 0)
692 goto free_tnr;
693
694 ret = config_fec(dev);
695 if (ret < 0)
696 goto free_nr;
697
698 /* config default params */
699 params_vdev = &dev->params_vdev[PARAM_VDEV_TNR];
700 params_vdev->params_ops->rkispp_params_cfg(params_vdev, 0);
701 params_vdev = &dev->params_vdev[PARAM_VDEV_NR];
702 params_vdev->params_ops->rkispp_params_cfg(params_vdev, 0);
703 params_vdev = &dev->params_vdev[PARAM_VDEV_FEC];
704 params_vdev->params_ops->rkispp_params_cfg(params_vdev, 0);
705 return 0;
706 free_nr:
707 nr_free_buf(dev);
708 free_tnr:
709 tnr_free_buf(dev);
710 return ret;
711 }
712
rkispp_destroy_buf(struct rkispp_stream * stream)713 static void rkispp_destroy_buf(struct rkispp_stream *stream)
714 {
715 struct rkispp_device *dev = stream->isppdev;
716 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
717
718 if (atomic_read(&vdev->refcnt) == 1) {
719 vdev->irq_ends = 0;
720 tnr_free_buf(dev);
721 nr_free_buf(dev);
722 fec_free_buf(dev);
723 rkispp_event_handle(dev, CMD_FREE_POOL, NULL);
724 }
725 }
726
727
nr_work_event(struct rkispp_device * dev,struct rkisp_ispp_buf * buf_rd,struct rkispp_dummy_buffer * buf_wr,bool is_isr)728 static void nr_work_event(struct rkispp_device *dev,
729 struct rkisp_ispp_buf *buf_rd,
730 struct rkispp_dummy_buffer *buf_wr,
731 bool is_isr)
732 {
733 struct rkispp_params_vdev *params_vdev = &dev->params_vdev[PARAM_VDEV_NR];
734 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
735 struct rkispp_stream *stream = &vdev->stream[STREAM_II];
736 struct rkispp_monitor *monitor = &vdev->monitor;
737 void __iomem *base = dev->hw_dev->base_addr;
738 struct rkispp_dummy_buffer *buf_to_fec = NULL;
739 struct rkispp_dummy_buffer *dummy;
740 struct rkispp_buffer *inbuf;
741 struct v4l2_subdev *sd = NULL;
742 struct list_head *list;
743 struct dma_buf *dbuf;
744 unsigned long lock_flags = 0, lock_flags1 = 0;
745 bool is_start = false, is_quick = false, is_fec_event = false;
746 bool is_fec_en = (vdev->module_ens & ISPP_MODULE_FEC);
747 struct rkisp_ispp_reg *reg_buf = NULL;
748 u32 val;
749
750 if (!(vdev->module_ens & (ISPP_MODULE_NR | ISPP_MODULE_SHP)))
751 return;
752
753 if (dev->inp == INP_ISP) {
754 if (dev->isp_mode & ISP_ISPP_QUICK)
755 is_quick = true;
756 else
757 sd = dev->ispp_sdev.remote_sd;
758 }
759
760 spin_lock_irqsave(&vdev->nr.buf_lock, lock_flags);
761
762 /* event from nr frame end */
763 if (!buf_rd && !buf_wr && is_isr) {
764 vdev->nr.is_end = true;
765 is_fec_event = true;
766 if (vdev->nr.cur_rd) {
767 /* nr read buf return to isp or tnr */
768 if (vdev->nr.cur_rd->is_isp && sd) {
769 v4l2_subdev_call(sd, video, s_rx_buffer, vdev->nr.cur_rd, NULL);
770 } else if (!vdev->nr.cur_rd->priv) {
771 rkispp_module_work_event(dev, NULL, vdev->nr.cur_rd,
772 ISPP_MODULE_TNR, is_isr);
773 } else if (stream->streaming && vdev->nr.cur_rd->priv) {
774 inbuf = vdev->nr.cur_rd->priv;
775 vb2_buffer_done(&inbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
776 }
777 vdev->nr.cur_rd = NULL;
778 }
779
780 if (vdev->nr.cur_wr) {
781 /* nr write buf to fec */
782 buf_to_fec = vdev->nr.cur_wr;
783 vdev->nr.cur_wr = NULL;
784
785 if (vdev->is_done_early && !dev->hw_dev->is_first)
786 buf_to_fec = NULL;
787 }
788 }
789
790 if (!vdev->fec.is_end) {
791 if (buf_rd)
792 list_add_tail(&buf_rd->list, &vdev->nr.list_rd);
793 goto end;
794 }
795
796 spin_lock_irqsave(&monitor->lock, lock_flags1);
797 if (monitor->is_restart) {
798 if (buf_rd)
799 list_add_tail(&buf_rd->list, &vdev->nr.list_rd);
800 if (buf_wr)
801 list_add_tail(&buf_wr->list, &vdev->nr.list_wr);
802 goto restart_unlock;
803 }
804
805 list = &vdev->nr.list_rd;
806 if (buf_rd && vdev->nr.is_end && list_empty(list)) {
807 /* nr read buf from isp or tnr */
808 vdev->nr.cur_rd = buf_rd;
809 } else if (vdev->nr.is_end && !list_empty(list)) {
810 /* nr read buf from list
811 * nr processing slow than isp or tnr
812 * new read buf from isp or tnr into list
813 */
814 vdev->nr.cur_rd = get_list_buf(list, true);
815 if (buf_rd)
816 list_add_tail(&buf_rd->list, list);
817 } else if (!vdev->nr.is_end && buf_rd) {
818 /* nr no idle
819 * new read buf from isp or tnr into list
820 */
821 list_add_tail(&buf_rd->list, list);
822 }
823
824 list = &vdev->nr.list_wr;
825 if (vdev->nr.is_end && !vdev->nr.cur_wr) {
826 /* nr idle, get new write buf */
827 vdev->nr.cur_wr = buf_wr ? buf_wr :
828 get_list_buf(list, false);
829 } else if (buf_wr) {
830 /* tnr no idle, write buf from nr into list */
831 list_add_tail(&buf_wr->list, list);
832 }
833
834 if (vdev->nr.cur_rd && vdev->nr.is_end) {
835 if (vdev->nr.cur_rd->priv) {
836 inbuf = vdev->nr.cur_rd->priv;
837 val = inbuf->buff_addr[RKISPP_PLANE_Y];
838 rkispp_write(dev, RKISPP_NR_ADDR_BASE_Y, val);
839 val = inbuf->buff_addr[RKISPP_PLANE_UV];
840 rkispp_write(dev, RKISPP_NR_ADDR_BASE_UV, val);
841 } else if (!vdev->nr.cur_rd->is_isp) {
842 u32 size = sizeof(vdev->tnr.buf) / sizeof(*dummy);
843
844 dbuf = vdev->nr.cur_rd->dbuf[GROUP_BUF_PIC];
845 dummy = dbuf_to_dummy(dbuf, &vdev->tnr.buf.iir, size);
846 val = dummy->dma_addr;
847 rkispp_write(dev, RKISPP_NR_ADDR_BASE_Y, val);
848 val += vdev->nr.uv_offset;
849 rkispp_write(dev, RKISPP_NR_ADDR_BASE_UV, val);
850
851 dbuf = vdev->nr.cur_rd->dbuf[GROUP_BUF_GAIN];
852 dummy = dbuf_to_dummy(dbuf, &vdev->tnr.buf.iir, size);
853 val = dummy->dma_addr;
854 rkispp_write(dev, RKISPP_NR_ADDR_BASE_GAIN, val);
855 } else {
856 struct rkispp_isp_buf_pool *buf;
857
858 buf = get_pool_buf(dev, vdev->nr.cur_rd);
859 val = buf->dma[GROUP_BUF_PIC];
860 rkispp_write(dev, RKISPP_NR_ADDR_BASE_Y, val);
861 val += vdev->nr.uv_offset;
862 rkispp_write(dev, RKISPP_NR_ADDR_BASE_UV, val);
863
864 val = buf->dma[GROUP_BUF_GAIN];
865 rkispp_write(dev, RKISPP_NR_ADDR_BASE_GAIN, val);
866 }
867 is_start = true;
868 }
869
870 if (vdev->nr.is_end && is_quick)
871 is_start = true;
872
873 if (vdev->nr.cur_wr && is_start) {
874 dummy = vdev->nr.cur_wr;
875 val = dummy->dma_addr;
876 rkispp_write(dev, RKISPP_SHARP_WR_Y_BASE, val);
877 val += vdev->fec.uv_offset;
878 rkispp_write(dev, RKISPP_SHARP_WR_UV_BASE, val);
879 }
880
881 if (is_start) {
882 u32 seq = 0;
883 u64 timestamp = 0;
884
885 if (vdev->nr.cur_rd) {
886 seq = vdev->nr.cur_rd->frame_id;
887 timestamp = vdev->nr.cur_rd->frame_timestamp;
888 if (vdev->nr.cur_wr) {
889 vdev->nr.cur_wr->id = seq;
890 vdev->nr.cur_wr->timestamp = timestamp;
891 } else {
892 vdev->nr.buf.wr[0].id = seq;
893 vdev->nr.buf.wr[0].timestamp = timestamp;
894 }
895 if (!is_fec_en && !is_quick) {
896 dev->ispp_sdev.frame_timestamp = timestamp;
897 dev->ispp_sdev.frm_sync_seq = seq;
898 }
899 dev->stats_vdev[STATS_VDEV_NR].frame_id = seq;
900 params_vdev->params_ops->rkispp_params_cfg(params_vdev, seq);
901 }
902
903 /* check MB config and output buf beforce start, when MB connect to SHARP
904 * MB update by OTHER_FORCE_UPD
905 */
906 stream = &vdev->stream[STREAM_MB];
907 if (!is_fec_en && stream->streaming) {
908 if (!stream->is_cfg) {
909 secure_config_mb(stream);
910 } else if (!stream->curr_buf) {
911 get_stream_buf(stream);
912 if (stream->curr_buf)
913 vdev->stream_ops->update_mi(stream);
914 }
915 }
916
917 /* check SCL output buf beforce start
918 * SCL update by OTHER_FORCE_UPD
919 */
920 for (val = STREAM_S0; val <= STREAM_S2; val++) {
921 stream = &vdev->stream[val];
922 if (!stream->streaming || !stream->is_cfg || stream->curr_buf)
923 continue;
924 get_stream_buf(stream);
925 if (stream->curr_buf) {
926 vdev->stream_ops->update_mi(stream);
927 rkispp_set_bits(dev, stream->config->reg.ctrl, 0, SW_SCL_ENABLE);
928 } else {
929 rkispp_clear_bits(dev, stream->config->reg.ctrl, SW_SCL_ENABLE);
930 }
931 }
932
933 if (!dev->hw_dev->is_single) {
934 if (vdev->nr.cur_rd &&
935 (vdev->nr.cur_rd->is_isp || vdev->nr.cur_rd->priv)) {
936 rkispp_update_regs(dev, RKISPP_CTRL, RKISPP_TNR_CTRL);
937 writel(TNR_FORCE_UPD, base + RKISPP_CTRL_UPDATE);
938 }
939 rkispp_update_regs(dev, RKISPP_NR, RKISPP_ORB_MAX_FEATURE);
940 }
941
942 writel(OTHER_FORCE_UPD, base + RKISPP_CTRL_UPDATE);
943
944 val = readl(base + RKISPP_SHARP_CORE_CTRL);
945 if (!(val & SW_SHP_EN) && !is_fec_en && !stream->streaming)
946 writel(val | SW_SHP_DMA_DIS, base + RKISPP_SHARP_CORE_CTRL);
947 else if (val & SW_SHP_EN)
948 writel(val & ~SW_SHP_DMA_DIS, base + RKISPP_SHARP_CORE_CTRL);
949
950 v4l2_dbg(3, rkispp_debug, &dev->v4l2_dev,
951 "NR start seq:%d | Y_SHD rd:0x%x wr:0x%x\n",
952 seq, readl(base + RKISPP_NR_ADDR_BASE_Y_SHD),
953 readl(base + RKISPP_SHARP_WR_Y_BASE_SHD));
954
955 for (val = STREAM_S0; val <= STREAM_S2 && !is_fec_en; val++) {
956 stream = &vdev->stream[val];
957 /* check scale stream stop state */
958 if (stream->streaming && stream->stopping) {
959 if (stream->ops->is_stopped(stream)) {
960 stream->stopping = false;
961 stream->streaming = false;
962 wake_up(&stream->done);
963 } else {
964 stream->ops->stop(stream);
965 }
966 }
967 }
968
969 vdev->nr.dbg.id = seq;
970 vdev->nr.dbg.timestamp = ktime_get_ns();
971 if (monitor->is_en) {
972 monitor->nr.time = vdev->nr.dbg.interval / 1000 / 1000;
973 monitor->monitoring_module |= MONITOR_NR;
974 monitor->nr.is_err = false;
975 if (!completion_done(&monitor->nr.cmpl))
976 complete(&monitor->nr.cmpl);
977 }
978
979 if (rkispp_is_reg_withstream_global())
980 rkispp_find_regbuf_by_id(dev, ®_buf, dev->dev_id, seq);
981 if (reg_buf && (rkispp_debug_reg & ISPP_MODULE_NR)) {
982 u32 offset, size;
983
984 offset = reg_buf->reg_size;
985 size = 4 + RKISPP_NR_BUFFER_READY - RKISPP_NR_CTRL;
986 reg_buf->ispp_size[ISPP_ID_NR] = size;
987 reg_buf->ispp_offset[ISPP_ID_NR] = offset;
988 memcpy_fromio(®_buf->reg[offset], base + RKISPP_NR_CTRL, size);
989
990 offset += size;
991 reg_buf->reg_size = offset;
992 }
993 if (reg_buf && (rkispp_debug_reg & ISPP_MODULE_SHP)) {
994 u32 offset, size;
995
996 offset = reg_buf->reg_size;
997 size = 4 + RKISPP_SHARP_GRAD_RATIO - RKISPP_SHARP_CTRL;
998 reg_buf->ispp_size[ISPP_ID_SHP] = size;
999 reg_buf->ispp_offset[ISPP_ID_SHP] = offset;
1000 memcpy_fromio(®_buf->reg[offset], base + RKISPP_SHARP_CTRL, size);
1001
1002 offset += size;
1003 reg_buf->reg_size = offset;
1004 }
1005 if (reg_buf && (rkispp_debug_reg & ISPP_MODULE_ORB)) {
1006 u32 offset, size;
1007
1008 offset = reg_buf->reg_size;
1009 size = 4 + RKISPP_ORB_MAX_FEATURE - RKISPP_ORB_WR_BASE;
1010 reg_buf->ispp_size[ISPP_ID_ORB] = size;
1011 reg_buf->ispp_offset[ISPP_ID_ORB] = offset;
1012 memcpy_fromio(®_buf->reg[offset], base + RKISPP_ORB_WR_BASE, size);
1013
1014 offset += size;
1015 reg_buf->reg_size = offset;
1016 }
1017
1018 if (!is_quick && !dev->hw_dev->is_shutdown) {
1019 writel(NR_SHP_ST, base + RKISPP_CTRL_STRT);
1020
1021 if (!is_fec_en && vdev->is_done_early)
1022 hrtimer_start(&vdev->frame_qst,
1023 ns_to_ktime(1000000),
1024 HRTIMER_MODE_REL);
1025 }
1026 vdev->nr.is_end = false;
1027 }
1028 restart_unlock:
1029 spin_unlock_irqrestore(&monitor->lock, lock_flags1);
1030 end:
1031 /* nr_shp->fec->scl
1032 * fec start working should after nr
1033 * for scl will update by OTHER_FORCE_UPD
1034 */
1035 if (buf_to_fec) {
1036 if ((vdev->module_ens & ISPP_MODULE_FEC_ST) == ISPP_MODULE_FEC_ST) {
1037 rkispp_finish_buffer(dev, buf_to_fec);
1038 list_add_tail(&buf_to_fec->list, &dev->stream_vdev.nr.list_rpt);
1039 buf_to_fec = NULL;
1040 }
1041 rkispp_module_work_event(dev, buf_to_fec, NULL, ISPP_MODULE_FEC, false);
1042 } else if (!list_empty(&vdev->fec.list_rd) && is_fec_event) {
1043 rkispp_module_work_event(dev, NULL, NULL, ISPP_MODULE_FEC, false);
1044 }
1045 spin_unlock_irqrestore(&vdev->nr.buf_lock, lock_flags);
1046
1047 if (is_fec_en && vdev->is_done_early &&
1048 is_start && !dev->hw_dev->is_first &&
1049 (vdev->module_ens & ISPP_MODULE_FEC_ST) != ISPP_MODULE_FEC_ST)
1050 hrtimer_start(&vdev->fec_qst,
1051 ns_to_ktime(1000000),
1052 HRTIMER_MODE_REL);
1053 }
1054
tnr_work_event(struct rkispp_device * dev,struct rkisp_ispp_buf * buf_rd,struct rkisp_ispp_buf * buf_wr,bool is_isr)1055 static void tnr_work_event(struct rkispp_device *dev,
1056 struct rkisp_ispp_buf *buf_rd,
1057 struct rkisp_ispp_buf *buf_wr,
1058 bool is_isr)
1059 {
1060 struct rkispp_params_vdev *params_vdev = &dev->params_vdev[PARAM_VDEV_TNR];
1061 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1062 struct rkispp_stream *stream = &vdev->stream[STREAM_II];
1063 struct rkispp_monitor *monitor = &vdev->monitor;
1064 void __iomem *base = dev->hw_dev->base_addr;
1065 struct rkispp_dummy_buffer *dummy;
1066 struct rkispp_buffer *inbuf;
1067 struct v4l2_subdev *sd = NULL;
1068 struct list_head *list;
1069 struct dma_buf *dbuf;
1070 unsigned long lock_flags = 0, lock_flags1 = 0;
1071 u32 val, size = sizeof(vdev->tnr.buf) / sizeof(*dummy);
1072 bool is_en, is_3to1 = vdev->tnr.is_3to1, is_start = false;
1073 struct rkisp_ispp_reg *reg_buf = NULL;
1074
1075 if (!(vdev->module_ens & ISPP_MODULE_TNR) ||
1076 (dev->inp == INP_ISP && dev->isp_mode & ISP_ISPP_QUICK))
1077 return;
1078
1079 if (dev->inp == INP_ISP)
1080 sd = dev->ispp_sdev.remote_sd;
1081
1082 if (buf_rd)
1083 params_vdev->params_ops->rkispp_params_cfg(params_vdev, buf_rd->frame_id);
1084
1085 is_en = rkispp_read(dev, RKISPP_TNR_CORE_CTRL) & SW_TNR_EN;
1086
1087 spin_lock_irqsave(&vdev->tnr.buf_lock, lock_flags);
1088
1089 /* event from tnr frame end */
1090 if (!buf_rd && !buf_wr && is_isr) {
1091 vdev->tnr.is_end = true;
1092
1093 if (vdev->tnr.cur_rd) {
1094 /* tnr read buf return to isp */
1095 if (sd) {
1096 v4l2_subdev_call(sd, video, s_rx_buffer, vdev->tnr.cur_rd, NULL);
1097 } else if (stream->streaming && vdev->tnr.cur_rd->priv) {
1098 inbuf = vdev->tnr.cur_rd->priv;
1099 vb2_buffer_done(&inbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
1100 }
1101 if (vdev->tnr.cur_rd == vdev->tnr.nxt_rd)
1102 vdev->tnr.nxt_rd = NULL;
1103 vdev->tnr.cur_rd = NULL;
1104 }
1105
1106 if (vdev->tnr.cur_wr) {
1107 if (!vdev->tnr.cur_wr->is_move_judge || !vdev->tnr.is_trigger) {
1108 /* tnr write buf to nr */
1109 rkispp_module_work_event(dev, vdev->tnr.cur_wr, NULL,
1110 ISPP_MODULE_NR, is_isr);
1111 } else {
1112 dbuf = vdev->tnr.cur_wr->dbuf[GROUP_BUF_GAIN];
1113 dummy = dbuf_to_dummy(dbuf, &vdev->tnr.buf.iir, size);
1114 rkispp_finish_buffer(dev, dummy);
1115 rkispp_finish_buffer(dev, &vdev->tnr.buf.gain_kg);
1116 list_add_tail(&vdev->tnr.cur_wr->list, &vdev->tnr.list_rpt);
1117 }
1118 vdev->tnr.cur_wr = NULL;
1119 }
1120 }
1121
1122 if (!is_en) {
1123 if (buf_wr)
1124 list_add_tail(&buf_wr->list, &vdev->tnr.list_wr);
1125
1126 if (vdev->tnr.nxt_rd) {
1127 if (sd) {
1128 v4l2_subdev_call(sd, video, s_rx_buffer,
1129 vdev->tnr.nxt_rd, NULL);
1130 } else if (stream->streaming && vdev->tnr.nxt_rd->priv) {
1131 inbuf = vdev->tnr.nxt_rd->priv;
1132 vb2_buffer_done(&inbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
1133 }
1134 vdev->tnr.nxt_rd = NULL;
1135 }
1136 list = &vdev->tnr.list_rd;
1137 while (!list_empty(list)) {
1138 struct rkisp_ispp_buf *buf = get_list_buf(list, true);
1139
1140 rkispp_module_work_event(dev, buf, NULL,
1141 ISPP_MODULE_NR, is_isr);
1142 }
1143 if (buf_rd)
1144 rkispp_module_work_event(dev, buf_rd, NULL,
1145 ISPP_MODULE_NR, is_isr);
1146 goto end;
1147 }
1148
1149 spin_lock_irqsave(&monitor->lock, lock_flags1);
1150 if (monitor->is_restart) {
1151 if (buf_rd)
1152 list_add_tail(&buf_rd->list, &vdev->tnr.list_rd);
1153 if (buf_wr)
1154 list_add_tail(&buf_wr->list, &vdev->tnr.list_wr);
1155 goto restart_unlock;
1156 }
1157
1158 list = &vdev->tnr.list_rd;
1159 if (buf_rd && vdev->tnr.is_end && list_empty(list)) {
1160 /* tnr read buf from isp */
1161 vdev->tnr.cur_rd = vdev->tnr.nxt_rd;
1162 vdev->tnr.nxt_rd = buf_rd;
1163 /* first buf for 3to1 using twice */
1164 if (!is_3to1 ||
1165 (rkispp_read(dev, RKISPP_TNR_CTRL) & SW_TNR_1ST_FRM))
1166 vdev->tnr.cur_rd = vdev->tnr.nxt_rd;
1167 } else if (vdev->tnr.is_end && !list_empty(list)) {
1168 /* tnr read buf from list
1169 * tnr processing slow than isp
1170 * new read buf from isp into list
1171 */
1172 vdev->tnr.cur_rd = vdev->tnr.nxt_rd;
1173 vdev->tnr.nxt_rd = get_list_buf(list, true);
1174 if (!is_3to1)
1175 vdev->tnr.cur_rd = vdev->tnr.nxt_rd;
1176
1177 if (buf_rd)
1178 list_add_tail(&buf_rd->list, list);
1179 } else if (!vdev->tnr.is_end && buf_rd) {
1180 /* tnr no idle
1181 * new read buf from isp into list
1182 */
1183 list_add_tail(&buf_rd->list, list);
1184 }
1185
1186 list = &vdev->tnr.list_wr;
1187 if (vdev->tnr.is_end && !vdev->tnr.cur_wr) {
1188 /* tnr idle, get new write buf */
1189 vdev->tnr.cur_wr =
1190 buf_wr ? buf_wr : get_list_buf(list, true);
1191 } else if (buf_wr) {
1192 /* tnr no idle, write buf from nr into list */
1193 list_add_tail(&buf_wr->list, list);
1194 }
1195
1196 if (vdev->tnr.cur_rd && vdev->tnr.nxt_rd && vdev->tnr.is_end) {
1197 if (vdev->tnr.cur_rd->priv) {
1198 inbuf = vdev->tnr.cur_rd->priv;
1199 val = inbuf->buff_addr[RKISPP_PLANE_Y];
1200 rkispp_write(dev, RKISPP_TNR_CUR_Y_BASE, val);
1201 val = inbuf->buff_addr[RKISPP_PLANE_UV];
1202 rkispp_write(dev, RKISPP_TNR_CUR_UV_BASE, val);
1203 } else {
1204 struct rkispp_isp_buf_pool *buf;
1205
1206 buf = get_pool_buf(dev, vdev->tnr.cur_rd);
1207 val = buf->dma[GROUP_BUF_PIC];
1208 rkispp_write(dev, RKISPP_TNR_CUR_Y_BASE, val);
1209 val += vdev->tnr.uv_offset;
1210 rkispp_write(dev, RKISPP_TNR_CUR_UV_BASE, val);
1211
1212 val = buf->dma[GROUP_BUF_GAIN];
1213 rkispp_write(dev, RKISPP_TNR_GAIN_CUR_Y_BASE, val);
1214 if (is_3to1) {
1215 buf = get_pool_buf(dev, vdev->tnr.nxt_rd);
1216 val = buf->dma[GROUP_BUF_PIC];
1217 rkispp_write(dev, RKISPP_TNR_NXT_Y_BASE, val);
1218 val += vdev->tnr.uv_offset;
1219 rkispp_write(dev, RKISPP_TNR_NXT_UV_BASE, val);
1220
1221 val = buf->dma[GROUP_BUF_GAIN];
1222 rkispp_write(dev, RKISPP_TNR_GAIN_NXT_Y_BASE, val);
1223
1224 if (rkispp_read(dev, RKISPP_TNR_CTRL) & SW_TNR_1ST_FRM)
1225 vdev->tnr.cur_rd = NULL;
1226 }
1227 }
1228 is_start = true;
1229 }
1230
1231 if (vdev->tnr.cur_wr && is_start) {
1232 dbuf = vdev->tnr.cur_wr->dbuf[GROUP_BUF_PIC];
1233 dummy = dbuf_to_dummy(dbuf, &vdev->tnr.buf.iir, size);
1234 val = dummy->dma_addr;
1235 rkispp_write(dev, RKISPP_TNR_WR_Y_BASE, val);
1236 val += vdev->tnr.uv_offset;
1237 rkispp_write(dev, RKISPP_TNR_WR_UV_BASE, val);
1238
1239 dbuf = vdev->tnr.cur_wr->dbuf[GROUP_BUF_GAIN];
1240 dummy = dbuf_to_dummy(dbuf, &vdev->tnr.buf.iir, size);
1241 val = dummy->dma_addr;
1242 rkispp_write(dev, RKISPP_TNR_GAIN_WR_Y_BASE, val);
1243 }
1244
1245 if (is_start) {
1246 u32 seq = 0;
1247
1248 if (vdev->tnr.nxt_rd) {
1249 seq = vdev->tnr.nxt_rd->frame_id;
1250 if (vdev->tnr.cur_wr) {
1251 vdev->tnr.cur_wr->frame_id = seq;
1252 vdev->tnr.cur_wr->frame_timestamp =
1253 vdev->tnr.nxt_rd->frame_timestamp;
1254 vdev->tnr.cur_wr->is_move_judge =
1255 vdev->tnr.nxt_rd->is_move_judge;
1256 }
1257 dev->stats_vdev[STATS_VDEV_TNR].frame_id = seq;
1258 }
1259
1260 if (!dev->hw_dev->is_single)
1261 rkispp_update_regs(dev, RKISPP_CTRL, RKISPP_TNR_CORE_WEIGHT);
1262 writel(TNR_FORCE_UPD, base + RKISPP_CTRL_UPDATE);
1263
1264 v4l2_dbg(3, rkispp_debug, &dev->v4l2_dev,
1265 "TNR start seq:%d | Y_SHD nxt:0x%x cur:0x%x iir:0x%x wr:0x%x\n",
1266 seq, readl(base + RKISPP_TNR_NXT_Y_BASE_SHD),
1267 readl(base + RKISPP_TNR_CUR_Y_BASE_SHD),
1268 readl(base + RKISPP_TNR_IIR_Y_BASE_SHD),
1269 readl(base + RKISPP_TNR_WR_Y_BASE_SHD));
1270
1271 /* iir using previous tnr write frame */
1272 rkispp_write(dev, RKISPP_TNR_IIR_Y_BASE,
1273 rkispp_read(dev, RKISPP_TNR_WR_Y_BASE));
1274 rkispp_write(dev, RKISPP_TNR_IIR_UV_BASE,
1275 rkispp_read(dev, RKISPP_TNR_WR_UV_BASE));
1276
1277 rkispp_prepare_buffer(dev, &vdev->tnr.buf.gain_kg);
1278
1279 vdev->tnr.dbg.id = seq;
1280 vdev->tnr.dbg.timestamp = ktime_get_ns();
1281 if (monitor->is_en) {
1282 monitor->tnr.time = vdev->tnr.dbg.interval / 1000 / 1000;
1283 monitor->monitoring_module |= MONITOR_TNR;
1284 monitor->tnr.is_err = false;
1285 if (!completion_done(&monitor->tnr.cmpl))
1286 complete(&monitor->tnr.cmpl);
1287 }
1288
1289 if (rkispp_is_reg_withstream_global())
1290 rkispp_find_regbuf_by_id(dev, ®_buf, dev->dev_id, seq);
1291 if (reg_buf && (rkispp_debug_reg & ISPP_MODULE_TNR)) {
1292 u32 offset, size;
1293
1294 offset = reg_buf->reg_size;
1295 size = 4 + RKISPP_TNR_STATE - RKISPP_TNR_CTRL;
1296 reg_buf->ispp_size[ISPP_ID_TNR] = size;
1297 reg_buf->ispp_offset[ISPP_ID_TNR] = offset;
1298 memcpy_fromio(®_buf->reg[offset], base + RKISPP_TNR_CTRL, size);
1299
1300 offset += size;
1301 reg_buf->reg_size = offset;
1302 }
1303
1304 if (!dev->hw_dev->is_shutdown)
1305 writel(TNR_ST, base + RKISPP_CTRL_STRT);
1306 vdev->tnr.is_end = false;
1307 }
1308
1309 restart_unlock:
1310 spin_unlock_irqrestore(&monitor->lock, lock_flags1);
1311 end:
1312 spin_unlock_irqrestore(&vdev->tnr.buf_lock, lock_flags);
1313 }
1314
fec_work_event(struct rkispp_device * dev,void * buff_rd,bool is_isr,bool is_quick)1315 static void fec_work_event(struct rkispp_device *dev,
1316 void *buff_rd,
1317 bool is_isr, bool is_quick)
1318 {
1319 struct rkispp_params_vdev *params_vdev = &dev->params_vdev[PARAM_VDEV_FEC];
1320 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1321 struct rkispp_monitor *monitor = &vdev->monitor;
1322 struct list_head *list = &vdev->fec.list_rd;
1323 void __iomem *base = dev->hw_dev->base_addr;
1324 struct rkispp_dummy_buffer *dummy;
1325 struct rkispp_stream *stream;
1326 unsigned long lock_flags = 0, lock_flags1 = 0;
1327 bool is_start = false;
1328 struct rkisp_ispp_reg *reg_buf = NULL;
1329 u32 val;
1330 struct rkispp_dummy_buffer *buf_rd = buff_rd;
1331
1332 if (!(vdev->module_ens & ISPP_MODULE_FEC))
1333 return;
1334
1335 spin_lock_irqsave(&vdev->fec.buf_lock, lock_flags);
1336
1337 /* event from fec frame end */
1338 if (!buf_rd && is_isr) {
1339 vdev->fec.is_end = true;
1340
1341 if (vdev->fec.dummy_cur_rd || vdev->is_done_early)
1342 rkispp_module_work_event(dev, NULL, vdev->fec.dummy_cur_rd,
1343 ISPP_MODULE_NR, false);
1344 vdev->fec.dummy_cur_rd = NULL;
1345 }
1346
1347 spin_lock_irqsave(&monitor->lock, lock_flags1);
1348 if (monitor->is_restart && buf_rd) {
1349 list_add_tail(&buf_rd->list, list);
1350 goto restart_unlock;
1351 }
1352
1353 if (buf_rd && vdev->fec.is_end && list_empty(list)) {
1354 /* fec read buf from nr */
1355 vdev->fec.dummy_cur_rd = buf_rd;
1356 } else if (vdev->fec.is_end && !list_empty(list)) {
1357 /* fec read buf from list
1358 * fec processing slow than nr
1359 * new read buf from nr into list
1360 */
1361 vdev->fec.dummy_cur_rd = get_list_buf(list, false);
1362 if (buf_rd)
1363 list_add_tail(&buf_rd->list, list);
1364 } else if (!vdev->fec.is_end && buf_rd) {
1365 /* fec no idle
1366 * new read buf from nr into list
1367 */
1368 list_add_tail(&buf_rd->list, list);
1369 }
1370
1371 if (vdev->fec.dummy_cur_rd && vdev->fec.is_end) {
1372 dummy = vdev->fec.dummy_cur_rd;
1373 val = dummy->dma_addr;
1374 rkispp_write(dev, RKISPP_FEC_RD_Y_BASE, val);
1375 val += vdev->fec.uv_offset;
1376 rkispp_write(dev, RKISPP_FEC_RD_UV_BASE, val);
1377 is_start = true;
1378 }
1379
1380 if (is_start || is_quick) {
1381 u32 seq = 0;
1382
1383 if (vdev->fec.dummy_cur_rd) {
1384 seq = vdev->fec.dummy_cur_rd->id;
1385 dev->ispp_sdev.frame_timestamp =
1386 vdev->fec.dummy_cur_rd->timestamp;
1387 dev->ispp_sdev.frm_sync_seq = seq;
1388 params_vdev->params_ops->rkispp_params_cfg(params_vdev, seq);
1389 } else {
1390 seq = vdev->nr.buf.wr[0].id;
1391 dev->ispp_sdev.frame_timestamp =
1392 vdev->nr.buf.wr[0].timestamp;
1393 dev->ispp_sdev.frm_sync_seq = seq;
1394 }
1395
1396 /* check MB config and output buf beforce start, when MB connect to FEC
1397 * MB update by FEC_FORCE_UPD
1398 */
1399 stream = &vdev->stream[STREAM_MB];
1400 if (stream->streaming) {
1401 if (!stream->is_cfg) {
1402 secure_config_mb(stream);
1403 } else if (!stream->curr_buf) {
1404 get_stream_buf(stream);
1405 if (stream->curr_buf)
1406 update_mi(stream);
1407 }
1408 }
1409
1410 if (!dev->hw_dev->is_single) {
1411 rkispp_update_regs(dev, RKISPP_FEC, RKISPP_FEC_CROP);
1412 rkispp_update_regs(dev, RKISPP_SCL0, RKISPP_SCL2_FACTOR);
1413 }
1414
1415 writel(FEC_FORCE_UPD, base + RKISPP_CTRL_UPDATE);
1416 if (vdev->nr.is_end) {
1417 if (!dev->hw_dev->is_single)
1418 rkispp_update_regs(dev, RKISPP_SCL0_CTRL, RKISPP_SCL2_FACTOR);
1419 writel(OTHER_FORCE_UPD, base + RKISPP_CTRL_UPDATE);
1420 /* check scale stream stop state */
1421 for (val = STREAM_S0; val <= STREAM_S2; val++) {
1422 stream = &vdev->stream[val];
1423 if (stream->streaming && stream->stopping) {
1424 if (stream->ops->is_stopped(stream)) {
1425 stream->stopping = false;
1426 stream->streaming = false;
1427 wake_up(&stream->done);
1428 } else {
1429 stream->ops->stop(stream);
1430 }
1431 }
1432 }
1433 }
1434 v4l2_dbg(3, rkispp_debug, &dev->v4l2_dev,
1435 "FEC start seq:%d | Y_SHD rd:0x%x\n"
1436 "\txint:0x%x xfra:0x%x yint:0x%x yfra:0x%x\n",
1437 seq, readl(base + RKISPP_FEC_RD_Y_BASE_SHD),
1438 readl(base + RKISPP_FEC_MESH_XINT_BASE_SHD),
1439 readl(base + RKISPP_FEC_MESH_XFRA_BASE_SHD),
1440 readl(base + RKISPP_FEC_MESH_YINT_BASE_SHD),
1441 readl(base + RKISPP_FEC_MESH_YFRA_BASE_SHD));
1442
1443 vdev->fec.dbg.id = seq;
1444 vdev->fec.dbg.timestamp = ktime_get_ns();
1445 if (monitor->is_en) {
1446 monitor->fec.time = vdev->fec.dbg.interval / 1000 / 1000;
1447 monitor->monitoring_module |= MONITOR_FEC;
1448 if (!completion_done(&monitor->fec.cmpl))
1449 complete(&monitor->fec.cmpl);
1450 }
1451
1452 if (rkispp_is_reg_withstream_global())
1453 rkispp_find_regbuf_by_id(dev, ®_buf, dev->dev_id, seq);
1454 if (reg_buf && (rkispp_debug_reg & ISPP_MODULE_FEC)) {
1455 u32 offset, size;
1456
1457 offset = reg_buf->reg_size;
1458 size = 4 + RKISPP_FEC_CROP - RKISPP_FEC_CTRL;
1459 reg_buf->ispp_size[ISPP_ID_FEC] = size;
1460 reg_buf->ispp_offset[ISPP_ID_FEC] = offset;
1461 memcpy_fromio(®_buf->reg[offset], base + RKISPP_FEC_CTRL, size);
1462
1463 offset += size;
1464 reg_buf->reg_size = offset;
1465 }
1466
1467 if (!dev->hw_dev->is_shutdown) {
1468 writel(FEC_ST, base + RKISPP_CTRL_STRT);
1469
1470 if (vdev->is_done_early)
1471 hrtimer_start(&vdev->frame_qst,
1472 ns_to_ktime(5000000),
1473 HRTIMER_MODE_REL);
1474 }
1475 vdev->fec.is_end = false;
1476 }
1477 restart_unlock:
1478 spin_unlock_irqrestore(&monitor->lock, lock_flags1);
1479 spin_unlock_irqrestore(&vdev->fec.buf_lock, lock_flags);
1480 }
1481
1482
rkispp_set_trigger_mode(struct rkispp_device * dev,struct rkispp_trigger_mode * mode)1483 void rkispp_set_trigger_mode(struct rkispp_device *dev,
1484 struct rkispp_trigger_mode *mode)
1485 {
1486 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1487
1488 if (mode->module & ISPP_MODULE_TNR)
1489 vdev->tnr.is_trigger = mode->on;
1490 }
1491
rkispp_get_tnrbuf_fd(struct rkispp_device * dev,struct rkispp_buf_idxfd * idxfd)1492 int rkispp_get_tnrbuf_fd(struct rkispp_device *dev, struct rkispp_buf_idxfd *idxfd)
1493 {
1494 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1495 struct rkisp_ispp_buf *dbufs;
1496 struct rkispp_dummy_buffer *buf;
1497 unsigned long lock_flags = 0;
1498 int j, buf_idx, ret = 0;
1499
1500 spin_lock_irqsave(&vdev->tnr.buf_lock, lock_flags);
1501 if (!vdev->tnr.is_buf_init) {
1502 spin_unlock_irqrestore(&vdev->tnr.buf_lock, lock_flags);
1503 ret = -EAGAIN;
1504 return ret;
1505 }
1506 spin_unlock_irqrestore(&vdev->tnr.buf_lock, lock_flags);
1507
1508 buf_idx = 0;
1509 list_for_each_entry(dbufs, &vdev->tnr.list_wr, list) {
1510 for (j = 0; j < GROUP_BUF_MAX; j++) {
1511 dbufs->dfd[j] = dma_buf_fd(dbufs->dbuf[j], O_CLOEXEC);
1512 get_dma_buf(dbufs->dbuf[j]);
1513 idxfd->index[buf_idx] = dbufs->didx[j];
1514 idxfd->dmafd[buf_idx] = dbufs->dfd[j];
1515 buf_idx++;
1516 }
1517 }
1518
1519 list_for_each_entry(dbufs, &vdev->tnr.list_rpt, list) {
1520 for (j = 0; j < GROUP_BUF_MAX; j++) {
1521 dbufs->dfd[j] = dma_buf_fd(dbufs->dbuf[j], O_CLOEXEC);
1522 get_dma_buf(dbufs->dbuf[j]);
1523 idxfd->index[buf_idx] = dbufs->didx[j];
1524 idxfd->dmafd[buf_idx] = dbufs->dfd[j];
1525 buf_idx++;
1526 }
1527 }
1528
1529 if (vdev->tnr.cur_wr) {
1530 for (j = 0; j < GROUP_BUF_MAX; j++) {
1531 vdev->tnr.cur_wr->dfd[j] = dma_buf_fd(vdev->tnr.cur_wr->dbuf[j], O_CLOEXEC);
1532 get_dma_buf(vdev->tnr.cur_wr->dbuf[j]);
1533 idxfd->index[buf_idx] = vdev->tnr.cur_wr->didx[j];
1534 idxfd->dmafd[buf_idx] = vdev->tnr.cur_wr->dfd[j];
1535 buf_idx++;
1536 }
1537 }
1538
1539 buf = &vdev->tnr.buf.gain_kg;
1540 buf->dma_fd = dma_buf_fd(buf->dbuf, O_CLOEXEC);
1541 get_dma_buf(buf->dbuf);
1542 idxfd->index[buf_idx] = buf->index;
1543 idxfd->dmafd[buf_idx] = buf->dma_fd;
1544 buf_idx++;
1545
1546 idxfd->buf_num = buf_idx;
1547
1548 return ret;
1549 }
1550
rkispp_get_nrbuf_fd(struct rkispp_device * dev,struct rkispp_buf_idxfd * idxfd)1551 int rkispp_get_nrbuf_fd(struct rkispp_device *dev, struct rkispp_buf_idxfd *idxfd)
1552 {
1553 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1554 struct rkispp_dummy_buffer *buf;
1555 unsigned long lock_flags = 0;
1556 int i, ret = 0;
1557
1558 spin_lock_irqsave(&vdev->nr.buf_lock, lock_flags);
1559 if (!vdev->nr.is_buf_init) {
1560 spin_unlock_irqrestore(&vdev->nr.buf_lock, lock_flags);
1561 ret = -EAGAIN;
1562 return ret;
1563 }
1564 spin_unlock_irqrestore(&vdev->nr.buf_lock, lock_flags);
1565
1566 for (i = 0; i < RKISPP_FEC_BUF_MAX; i++) {
1567 buf = &vdev->nr.buf.wr[i];
1568 if (!buf->dbuf)
1569 break;
1570 buf->dma_fd = dma_buf_fd(buf->dbuf, O_CLOEXEC);
1571 get_dma_buf(buf->dbuf);
1572 idxfd->index[i] = i;
1573 idxfd->dmafd[i] = buf->dma_fd;
1574 }
1575 idxfd->buf_num = i;
1576 return ret;
1577 }
1578
rkispp_module_work_event(struct rkispp_device * dev,void * buf_rd,void * buf_wr,u32 module,bool is_isr)1579 static void rkispp_module_work_event(struct rkispp_device *dev,
1580 void *buf_rd, void *buf_wr,
1581 u32 module, bool is_isr)
1582 {
1583 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1584 bool is_fec_en = !!(vdev->module_ens & ISPP_MODULE_FEC);
1585 bool is_single = dev->hw_dev->is_single;
1586 //bool is_early = vdev->is_done_early;
1587
1588 if (dev->hw_dev->is_shutdown)
1589 return;
1590
1591 if (dev->ispp_sdev.state != ISPP_STOP) {
1592 if (module & ISPP_MODULE_TNR)
1593 tnr_work_event(dev, buf_rd, buf_wr, is_isr);
1594 else if (module & ISPP_MODULE_NR)
1595 nr_work_event(dev, buf_rd, buf_wr, is_isr);
1596 else
1597 fec_work_event(dev, buf_rd, is_isr, false);
1598 }
1599
1600 /*
1601 * ispp frame done to do next conditions
1602 * mulit dev: cur frame (tnr->nr->fec) done for next frame
1603 * 1.single dev: fec async with tnr, and sync with nr:
1604 * { f0 }
1605 * tnr->nr->fec->|
1606 * |->tnr->nr->fec
1607 * { f1 }
1608 * 2.single dev and early mode:
1609 * { f0 } { f1 } { f2 }
1610 * tnr->nr->tnr->nr->tnr->nr
1611 * |->fec->||->fec->|
1612 * { f0 }{ f1 }
1613 * 3.single fec
1614 *
1615 */
1616 if (is_isr && !buf_rd && !buf_wr &&
1617 ((!is_fec_en && module == ISPP_MODULE_NR) ||
1618 (is_fec_en &&
1619 ((module == ISPP_MODULE_NR && (is_single || vdev->fec.is_end)) ||
1620 (module == ISPP_MODULE_FEC && !is_single && vdev->fec.is_end))))) {
1621 dev->stream_vdev.monitor.retry = 0;
1622 rkispp_soft_reset(dev->hw_dev);
1623 rkispp_event_handle(dev, CMD_QUEUE_DMABUF, NULL);
1624 }
1625
1626 if (dev->ispp_sdev.state == ISPP_STOP) {
1627 if ((module & (ISPP_MODULE_TNR | ISPP_MODULE_NR)) && buf_rd) {
1628 struct rkisp_ispp_buf *buf = buf_rd;
1629
1630 if (buf->is_isp)
1631 v4l2_subdev_call(dev->ispp_sdev.remote_sd,
1632 video, s_rx_buffer, buf, NULL);
1633 }
1634 if (!dev->hw_dev->is_idle)
1635 dev->hw_dev->is_idle = true;
1636 }
1637 }
1638
start_isp(struct rkispp_device * dev)1639 static int start_isp(struct rkispp_device *dev)
1640 {
1641 struct rkispp_subdev *ispp_sdev = &dev->ispp_sdev;
1642 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1643 struct rkispp_stream *stream;
1644 struct rkisp_ispp_mode mode;
1645 int i, ret;
1646
1647 if (dev->inp != INP_ISP || ispp_sdev->state)
1648 return 0;
1649
1650 if (dev->stream_sync) {
1651 /* output stream enable then start isp */
1652 for (i = STREAM_MB; i <= STREAM_S2; i++) {
1653 stream = &vdev->stream[i];
1654 if (stream->linked && !stream->streaming)
1655 return 0;
1656 }
1657 } else if (atomic_read(&vdev->refcnt) > 1) {
1658 return 0;
1659 }
1660
1661 rkispp_start_3a_run(dev);
1662
1663 mutex_lock(&dev->hw_dev->dev_lock);
1664
1665 mode.work_mode = dev->isp_mode;
1666 mode.buf_num = ((vdev->module_ens & ISPP_MODULE_TNR_3TO1) ==
1667 ISPP_MODULE_TNR_3TO1) ? 2 : 1;
1668 mode.buf_num += RKISP_BUF_MAX + 2 * (dev->hw_dev->dev_num - 1);
1669 ret = v4l2_subdev_call(ispp_sdev->remote_sd, core, ioctl,
1670 RKISP_ISPP_CMD_SET_MODE, &mode);
1671 if (ret)
1672 goto err;
1673
1674 ret = config_modules(dev);
1675 if (ret) {
1676 rkispp_event_handle(dev, CMD_FREE_POOL, NULL);
1677 mode.work_mode = ISP_ISPP_INIT_FAIL;
1678 v4l2_subdev_call(ispp_sdev->remote_sd, core, ioctl,
1679 RKISP_ISPP_CMD_SET_MODE, &mode);
1680 goto err;
1681 }
1682 if (dev->hw_dev->is_single)
1683 writel(ALL_FORCE_UPD, dev->hw_dev->base_addr + RKISPP_CTRL_UPDATE);
1684 for (i = STREAM_MB; i <= STREAM_S2; i++) {
1685 stream = &vdev->stream[i];
1686 if (stream->streaming)
1687 stream->is_upd = true;
1688 }
1689 if (dev->isp_mode & ISP_ISPP_QUICK)
1690 rkispp_set_bits(dev, RKISPP_CTRL_QUICK, 0, GLB_QUICK_EN);
1691
1692 dev->isr_cnt = 0;
1693 dev->isr_err_cnt = 0;
1694 ret = v4l2_subdev_call(&ispp_sdev->sd, video, s_stream, true);
1695 err:
1696 mutex_unlock(&dev->hw_dev->dev_lock);
1697 return ret;
1698 }
1699
check_to_force_update(struct rkispp_device * dev,u32 mis_val)1700 static void check_to_force_update(struct rkispp_device *dev, u32 mis_val)
1701 {
1702 struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1703 struct rkispp_stream *stream;
1704 u32 i, mask = NR_INT | SHP_INT;
1705 bool is_fec_en = (vdev->module_ens & ISPP_MODULE_FEC);
1706
1707 if (mis_val & TNR_INT)
1708 rkispp_module_work_event(dev, NULL, NULL,
1709 ISPP_MODULE_TNR, true);
1710 if (mis_val & FEC_INT)
1711 rkispp_module_work_event(dev, NULL, NULL,
1712 ISPP_MODULE_FEC, true);
1713
1714 /* wait nr_shp/fec/scl idle */
1715 for (i = STREAM_S0; i <= STREAM_S2; i++) {
1716 stream = &vdev->stream[i];
1717 if (stream->is_upd && !is_fec_en &&
1718 rkispp_read(dev, stream->config->reg.ctrl) & SW_SCL_ENABLE)
1719 mask |= stream->config->frame_end_id;
1720 }
1721
1722 vdev->irq_ends |= (mis_val & mask);
1723 v4l2_dbg(3, rkispp_debug, &dev->v4l2_dev,
1724 "irq_ends:0x%x mask:0x%x\n",
1725 vdev->irq_ends, mask);
1726 if (vdev->irq_ends != mask)
1727 return;
1728 vdev->irq_ends = 0;
1729 rkispp_module_work_event(dev, NULL, NULL,
1730 ISPP_MODULE_NR, true);
1731
1732 for (i = STREAM_MB; i <= STREAM_S2; i++) {
1733 stream = &vdev->stream[i];
1734 if (stream->streaming)
1735 stream->is_upd = true;
1736 }
1737 }
1738
1739 static struct rkispp_stream_ops rkispp_stream_ops = {
1740 .config_modules = config_modules,
1741 .destroy_buf = rkispp_destroy_buf,
1742 .fec_work_event = fec_work_event,
1743 .start_isp = start_isp,
1744 .check_to_force_update = check_to_force_update,
1745 .update_mi = update_mi,
1746 .rkispp_frame_done_early = rkispp_frame_done_early,
1747 .rkispp_module_work_event = rkispp_module_work_event,
1748 };
1749
rkispp_stream_init_ops_v10(struct rkispp_stream_vdev * stream_vdev)1750 void rkispp_stream_init_ops_v10(struct rkispp_stream_vdev *stream_vdev)
1751 {
1752 stream_vdev->stream_ops = &rkispp_stream_ops;
1753 }
1754