xref: /OK3568_Linux_fs/kernel/drivers/media/platform/rockchip/ispp/stream.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd. */
3 
4 #include <linux/clk.h>
5 #include <linux/delay.h>
6 #include <linux/pm_runtime.h>
7 #include <linux/slab.h>
8 #include <media/v4l2-common.h>
9 #include <media/v4l2-event.h>
10 #include <media/v4l2-fh.h>
11 #include <media/v4l2-ioctl.h>
12 #include <media/v4l2-mc.h>
13 #include <media/v4l2-subdev.h>
14 #include <media/videobuf2-dma-contig.h>
15 #include <media/videobuf2-dma-sg.h>
16 #include <linux/rk-isp1-config.h>
17 #include <uapi/linux/rk-video-format.h>
18 
19 #include "dev.h"
20 #include "regs.h"
21 
22 #define STREAM_IN_REQ_BUFS_MIN 1
23 #define STREAM_OUT_REQ_BUFS_MIN 0
24 
25 /* memory align for mpp */
26 #define RK_MPP_ALIGN 4096
27 
28 static const struct capture_fmt input_fmts[] = {
29 	{
30 		.fourcc = V4L2_PIX_FMT_YUYV,
31 		.bpp = { 16 },
32 		.cplanes = 1,
33 		.mplanes = 1,
34 		.wr_fmt = FMT_YC_SWAP | FMT_YUYV | FMT_YUV422,
35 	}, {
36 		.fourcc = V4L2_PIX_FMT_UYVY,
37 		.bpp = { 16 },
38 		.cplanes = 1,
39 		.mplanes = 1,
40 		.wr_fmt = FMT_YUYV | FMT_YUV422,
41 	}, {
42 		.fourcc = V4L2_PIX_FMT_NV16,
43 		.bpp = { 8, 16 },
44 		.cplanes = 2,
45 		.mplanes = 1,
46 		.wr_fmt = FMT_YUV422,
47 	}, {
48 		.fourcc = V4L2_PIX_FMT_NV12,
49 		.bpp = { 8, 16 },
50 		.cplanes = 2,
51 		.mplanes = 1,
52 		.wr_fmt = FMT_YUV420,
53 	}
54 };
55 
56 static const struct capture_fmt mb_fmts[] = {
57 	{
58 		.fourcc = V4L2_PIX_FMT_YUYV,
59 		.bpp = { 16 },
60 		.cplanes = 1,
61 		.mplanes = 1,
62 		.wr_fmt = FMT_YC_SWAP | FMT_YUYV | FMT_YUV422,
63 	}, {
64 		.fourcc = V4L2_PIX_FMT_UYVY,
65 		.bpp = { 16 },
66 		.cplanes = 1,
67 		.mplanes = 1,
68 		.wr_fmt = FMT_YUYV | FMT_YUV422,
69 	}, {
70 		.fourcc = V4L2_PIX_FMT_NV16,
71 		.bpp = { 8, 16 },
72 		.cplanes = 2,
73 		.mplanes = 1,
74 		.wr_fmt = FMT_YUV422,
75 	}, {
76 		.fourcc = V4L2_PIX_FMT_NV12,
77 		.bpp = { 8, 16 },
78 		.cplanes = 2,
79 		.mplanes = 1,
80 		.wr_fmt = FMT_YUV420,
81 	}, {
82 		.fourcc = V4L2_PIX_FMT_FBC2,
83 		.bpp = { 8, 16 },
84 		.cplanes = 2,
85 		.mplanes = 1,
86 		.wr_fmt = FMT_YUV422 | FMT_FBC,
87 	}, {
88 		.fourcc = V4L2_PIX_FMT_FBC0,
89 		.bpp = { 8, 16 },
90 		.cplanes = 2,
91 		.mplanes = 1,
92 		.wr_fmt = FMT_YUV420 | FMT_FBC,
93 	}
94 };
95 
96 static const struct capture_fmt scl_fmts[] = {
97 	{
98 		.fourcc = V4L2_PIX_FMT_NV16,
99 		.bpp = { 8, 16 },
100 		.cplanes = 2,
101 		.mplanes = 1,
102 		.wr_fmt = FMT_YUV422,
103 	}, {
104 		.fourcc = V4L2_PIX_FMT_NV12,
105 		.bpp = { 8, 16 },
106 		.cplanes = 2,
107 		.mplanes = 1,
108 		.wr_fmt = FMT_YUV420,
109 	}, {
110 		.fourcc = V4L2_PIX_FMT_GREY,
111 		.bpp = { 8 },
112 		.cplanes = 1,
113 		.mplanes = 1,
114 		.wr_fmt = FMT_YUV422,
115 	}, {
116 		.fourcc = V4L2_PIX_FMT_YUYV,
117 		.bpp = { 16 },
118 		.cplanes = 1,
119 		.mplanes = 1,
120 		.wr_fmt = FMT_YC_SWAP | FMT_YUYV | FMT_YUV422,
121 	}, {
122 		.fourcc = V4L2_PIX_FMT_UYVY,
123 		.bpp = { 16 },
124 		.cplanes = 1,
125 		.mplanes = 1,
126 		.wr_fmt = FMT_YUYV | FMT_YUV422,
127 	}
128 };
129 
130 static struct stream_config input_config = {
131 	.fmts = input_fmts,
132 	.fmt_size = ARRAY_SIZE(input_fmts),
133 };
134 
135 static struct stream_config mb_config = {
136 	.fmts = mb_fmts,
137 	.fmt_size = ARRAY_SIZE(mb_fmts),
138 };
139 
140 static struct stream_config scl0_config = {
141 	.fmts = scl_fmts,
142 	.fmt_size = ARRAY_SIZE(scl_fmts),
143 	.frame_end_id = SCL0_INT,
144 	.reg = {
145 		.ctrl = RKISPP_SCL0_CTRL,
146 		.factor = RKISPP_SCL0_FACTOR,
147 		.cur_y_base = RKISPP_SCL0_CUR_Y_BASE,
148 		.cur_uv_base = RKISPP_SCL0_CUR_UV_BASE,
149 		.cur_vir_stride = RKISPP_SCL0_CUR_VIR_STRIDE,
150 		.cur_y_base_shd = RKISPP_SCL0_CUR_Y_BASE_SHD,
151 		.cur_uv_base_shd = RKISPP_SCL0_CUR_UV_BASE_SHD,
152 	},
153 };
154 
155 static struct stream_config scl1_config = {
156 	.fmts = scl_fmts,
157 	.fmt_size = ARRAY_SIZE(scl_fmts),
158 	.frame_end_id = SCL1_INT,
159 	.reg = {
160 		.ctrl = RKISPP_SCL1_CTRL,
161 		.factor = RKISPP_SCL1_FACTOR,
162 		.cur_y_base = RKISPP_SCL1_CUR_Y_BASE,
163 		.cur_uv_base = RKISPP_SCL1_CUR_UV_BASE,
164 		.cur_vir_stride = RKISPP_SCL1_CUR_VIR_STRIDE,
165 		.cur_y_base_shd = RKISPP_SCL1_CUR_Y_BASE_SHD,
166 		.cur_uv_base_shd = RKISPP_SCL1_CUR_UV_BASE_SHD,
167 	},
168 };
169 
170 static struct stream_config scl2_config = {
171 	.fmts = scl_fmts,
172 	.fmt_size = ARRAY_SIZE(scl_fmts),
173 	.frame_end_id = SCL2_INT,
174 	.reg = {
175 		.ctrl = RKISPP_SCL2_CTRL,
176 		.factor = RKISPP_SCL2_FACTOR,
177 		.cur_y_base = RKISPP_SCL2_CUR_Y_BASE,
178 		.cur_uv_base = RKISPP_SCL2_CUR_UV_BASE,
179 		.cur_vir_stride = RKISPP_SCL2_CUR_VIR_STRIDE,
180 		.cur_y_base_shd = RKISPP_SCL2_CUR_Y_BASE_SHD,
181 		.cur_uv_base_shd = RKISPP_SCL2_CUR_UV_BASE_SHD,
182 	},
183 };
184 
set_vir_stride(struct rkispp_stream * stream,u32 val)185 static void set_vir_stride(struct rkispp_stream *stream, u32 val)
186 {
187 	rkispp_write(stream->isppdev, stream->config->reg.cur_vir_stride, val);
188 }
189 
set_scl_factor(struct rkispp_stream * stream,u32 val)190 static void set_scl_factor(struct rkispp_stream *stream, u32 val)
191 {
192 	rkispp_write(stream->isppdev, stream->config->reg.factor, val);
193 }
194 
fcc_xysubs(u32 fcc,u32 * xsubs,u32 * ysubs)195 static int fcc_xysubs(u32 fcc, u32 *xsubs, u32 *ysubs)
196 {
197 	switch (fcc) {
198 	case V4L2_PIX_FMT_GREY:
199 		*xsubs = 1;
200 		*ysubs = 1;
201 		break;
202 	case V4L2_PIX_FMT_NV16:
203 	case V4L2_PIX_FMT_NV61:
204 	case V4L2_PIX_FMT_FBC2:
205 		*xsubs = 2;
206 		*ysubs = 1;
207 		break;
208 	case V4L2_PIX_FMT_NV12:
209 	case V4L2_PIX_FMT_NV21:
210 	case V4L2_PIX_FMT_FBC0:
211 		*xsubs = 2;
212 		*ysubs = 2;
213 		break;
214 	default:
215 		return -EINVAL;
216 	}
217 	return 0;
218 }
219 
220 static const
find_fmt(struct rkispp_stream * stream,const u32 pixelfmt)221 struct capture_fmt *find_fmt(struct rkispp_stream *stream,
222 			     const u32 pixelfmt)
223 {
224 	const struct capture_fmt *fmt;
225 	unsigned int i;
226 
227 	for (i = 0; i < stream->config->fmt_size; i++) {
228 		fmt = &stream->config->fmts[i];
229 		if (fmt->fourcc == pixelfmt)
230 			return fmt;
231 	}
232 	return NULL;
233 }
234 
vir_cpy_image(struct work_struct * work)235 static void vir_cpy_image(struct work_struct *work)
236 {
237 	struct rkispp_vir_cpy *cpy =
238 		container_of(work, struct rkispp_vir_cpy, work);
239 	struct rkispp_stream *vir = cpy->stream;
240 	struct rkispp_buffer *src_buf = NULL;
241 	unsigned long lock_flags = 0;
242 	u32 i;
243 
244 	v4l2_dbg(1, rkispp_debug, &vir->isppdev->v4l2_dev,
245 		 "%s enter\n", __func__);
246 
247 	vir->streaming = true;
248 	spin_lock_irqsave(&vir->vbq_lock, lock_flags);
249 	if (!list_empty(&cpy->queue)) {
250 		src_buf = list_first_entry(&cpy->queue,
251 				struct rkispp_buffer, queue);
252 		list_del(&src_buf->queue);
253 	}
254 	spin_unlock_irqrestore(&vir->vbq_lock, lock_flags);
255 
256 	while (src_buf || vir->streaming) {
257 		if (vir->stopping || !vir->streaming)
258 			goto end;
259 		if (!src_buf)
260 			wait_for_completion(&cpy->cmpl);
261 
262 		vir->is_end = false;
263 		spin_lock_irqsave(&vir->vbq_lock, lock_flags);
264 		if (!src_buf && !list_empty(&cpy->queue)) {
265 			src_buf = list_first_entry(&cpy->queue,
266 					struct rkispp_buffer, queue);
267 			list_del(&src_buf->queue);
268 		}
269 		if (src_buf && !vir->curr_buf && !list_empty(&vir->buf_queue)) {
270 			vir->curr_buf = list_first_entry(&vir->buf_queue,
271 					struct rkispp_buffer, queue);
272 			list_del(&vir->curr_buf->queue);
273 		}
274 		spin_unlock_irqrestore(&vir->vbq_lock, lock_flags);
275 		if (!vir->curr_buf || !src_buf)
276 			goto end;
277 		for (i = 0; i < vir->out_cap_fmt.mplanes; i++) {
278 			u32 payload_size = vir->out_fmt.plane_fmt[i].sizeimage;
279 			void *src = vb2_plane_vaddr(&src_buf->vb.vb2_buf, i);
280 			void *dst = vb2_plane_vaddr(&vir->curr_buf->vb.vb2_buf, i);
281 
282 			if (!src || !dst)
283 				break;
284 			vb2_set_plane_payload(&vir->curr_buf->vb.vb2_buf, i, payload_size);
285 			memcpy(dst, src, payload_size);
286 		}
287 		vir->curr_buf->vb.sequence = src_buf->vb.sequence;
288 		vir->curr_buf->vb.vb2_buf.timestamp = src_buf->vb.vb2_buf.timestamp;
289 		vb2_buffer_done(&vir->curr_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
290 		vir->curr_buf = NULL;
291 end:
292 		if (src_buf)
293 			vb2_buffer_done(&src_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
294 		src_buf = NULL;
295 		spin_lock_irqsave(&vir->vbq_lock, lock_flags);
296 		if (!list_empty(&cpy->queue)) {
297 			src_buf = list_first_entry(&cpy->queue,
298 					struct rkispp_buffer, queue);
299 			list_del(&src_buf->queue);
300 		} else if (vir->stopping) {
301 			vir->streaming = false;
302 		}
303 		spin_unlock_irqrestore(&vir->vbq_lock, lock_flags);
304 	}
305 
306 	vir->is_end = true;
307 	if (vir->stopping) {
308 		vir->stopping = false;
309 		vir->streaming = false;
310 		wake_up(&vir->done);
311 	}
312 	v4l2_dbg(1, rkispp_debug, &vir->isppdev->v4l2_dev,
313 		 "%s exit\n", __func__);
314 }
315 
irq_work(struct work_struct * work)316 static void irq_work(struct work_struct *work)
317 {
318 	struct rkispp_device *dev = container_of(work, struct rkispp_device, irq_work);
319 
320 	rkispp_set_clk_rate(dev->hw_dev->clks[0], dev->hw_dev->core_clk_max);
321 	dev->stream_vdev.stream_ops->check_to_force_update(dev, dev->mis_val);
322 	dev->hw_dev->is_first = false;
323 }
324 
get_stream_buf(struct rkispp_stream * stream)325 void get_stream_buf(struct rkispp_stream *stream)
326 {
327 	unsigned long lock_flags = 0;
328 
329 	spin_lock_irqsave(&stream->vbq_lock, lock_flags);
330 	if (!list_empty(&stream->buf_queue) && !stream->curr_buf) {
331 		stream->curr_buf =
332 			list_first_entry(&stream->buf_queue,
333 					 struct rkispp_buffer, queue);
334 		list_del(&stream->curr_buf->queue);
335 	}
336 	spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
337 }
338 
rkispp_frame_end(struct rkispp_stream * stream,u32 state)339 int rkispp_frame_end(struct rkispp_stream *stream, u32 state)
340 {
341 	struct rkispp_device *dev = stream->isppdev;
342 	struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
343 	struct capture_fmt *fmt = &stream->out_cap_fmt;
344 	struct rkisp_ispp_reg *reg_buf = NULL;
345 	unsigned long lock_flags = 0;
346 	int i = 0;
347 
348 	if (state == FRAME_IRQ && dev->stream_vdev.is_done_early)
349 		return 0;
350 
351 	if (stream->curr_buf) {
352 		struct rkispp_stream *vir = &dev->stream_vdev.stream[STREAM_VIR];
353 		u64 ns = dev->ispp_sdev.frame_timestamp;
354 
355 		if (!ns)
356 			ns = ktime_get_ns();
357 
358 		for (i = 0; i < fmt->mplanes; i++) {
359 			u32 payload_size =
360 				stream->out_fmt.plane_fmt[i].sizeimage;
361 			vb2_set_plane_payload(&stream->curr_buf->vb.vb2_buf, i,
362 					      payload_size);
363 		}
364 		stream->curr_buf->vb.sequence = dev->ispp_sdev.frm_sync_seq;
365 		stream->curr_buf->vb.vb2_buf.timestamp = ns;
366 
367 		if (stream->is_reg_withstream &&
368 		    (fmt->wr_fmt & FMT_FBC || fmt->wr_fmt == FMT_YUV420)) {
369 			void *addr = vb2_plane_vaddr(&stream->curr_buf->vb.vb2_buf, i);
370 
371 			rkispp_find_regbuf_by_id(dev, &reg_buf, dev->dev_id,
372 						 stream->curr_buf->vb.sequence);
373 			if (reg_buf) {
374 				u32 cpy_size = offsetof(struct rkisp_ispp_reg, reg);
375 
376 				cpy_size += reg_buf->reg_size;
377 				memcpy(addr, reg_buf, cpy_size);
378 
379 				rkispp_release_regbuf(dev, reg_buf);
380 				vb2_set_plane_payload(&stream->curr_buf->vb.vb2_buf, 1, cpy_size);
381 				v4l2_dbg(3, rkispp_debug, &dev->v4l2_dev,
382 					 "stream(0x%x) write reg buf to last plane\n",
383 					 stream->id);
384 			} else {
385 				v4l2_err(&dev->v4l2_dev,
386 					 "%s can not find reg buf: dev_id %d, sequence %d\n",
387 					 __func__, dev->dev_id, stream->curr_buf->vb.sequence);
388 			}
389 		}
390 
391 		if (vir->streaming && vir->conn_id == stream->id) {
392 			spin_lock_irqsave(&vir->vbq_lock, lock_flags);
393 			if (vir->streaming)
394 				list_add_tail(&stream->curr_buf->queue,
395 					&dev->stream_vdev.vir_cpy.queue);
396 			spin_unlock_irqrestore(&vir->vbq_lock, lock_flags);
397 			if (!completion_done(&dev->stream_vdev.vir_cpy.cmpl))
398 				complete(&dev->stream_vdev.vir_cpy.cmpl);
399 			if (!vir->streaming)
400 				vb2_buffer_done(&stream->curr_buf->vb.vb2_buf,
401 						VB2_BUF_STATE_DONE);
402 		} else {
403 			vb2_buffer_done(&stream->curr_buf->vb.vb2_buf,
404 					VB2_BUF_STATE_DONE);
405 		}
406 		ns = ktime_get_ns();
407 		stream->dbg.interval = ns - stream->dbg.timestamp;
408 		stream->dbg.timestamp = ns;
409 		stream->dbg.delay = ns - stream->curr_buf->vb.vb2_buf.timestamp;
410 		stream->dbg.id = stream->curr_buf->vb.sequence;
411 
412 		stream->curr_buf = NULL;
413 	} else {
414 		u32 frame_id = dev->ispp_sdev.frm_sync_seq;
415 
416 		if (stream->is_cfg) {
417 			stream->dbg.frameloss++;
418 			v4l2_dbg(0, rkispp_debug, &dev->v4l2_dev,
419 				 "stream:%d no buf, lost frame:%d\n",
420 				 stream->id, frame_id);
421 		}
422 
423 		if (stream->is_reg_withstream &&
424 		    (fmt->wr_fmt & FMT_FBC || fmt->wr_fmt == FMT_YUV420)) {
425 			rkispp_find_regbuf_by_id(dev, &reg_buf, dev->dev_id, frame_id);
426 			if (reg_buf) {
427 				rkispp_release_regbuf(dev, reg_buf);
428 				v4l2_info(&dev->v4l2_dev,
429 					  "%s: current frame use dummy buffer(dev_id %d, sequence %d)\n",
430 					  __func__, dev->dev_id, frame_id);
431 			}
432 		}
433 	}
434 
435 	get_stream_buf(stream);
436 	vdev->stream_ops->update_mi(stream);
437 	return 0;
438 }
439 
get_pool_buf(struct rkispp_device * dev,struct rkisp_ispp_buf * dbufs)440 void *get_pool_buf(struct rkispp_device *dev,
441 			  struct rkisp_ispp_buf *dbufs)
442 {
443 	int i;
444 
445 	for (i = 0; i < RKISPP_BUF_POOL_MAX; i++)
446 		if (dev->hw_dev->pool[i].dbufs == dbufs)
447 			return &dev->hw_dev->pool[i];
448 
449 	return NULL;
450 }
451 
dbuf_to_dummy(struct dma_buf * dbuf,struct rkispp_dummy_buffer * pool,int num)452 void *dbuf_to_dummy(struct dma_buf *dbuf,
453 			   struct rkispp_dummy_buffer *pool,
454 			   int num)
455 {
456 	int i;
457 
458 	for (i = 0; i < num; i++) {
459 		if (pool->dbuf == dbuf)
460 			return pool;
461 		pool++;
462 	}
463 
464 	return NULL;
465 }
466 
get_list_buf(struct list_head * list,bool is_isp_ispp)467 void *get_list_buf(struct list_head *list, bool is_isp_ispp)
468 {
469 	void *buf = NULL;
470 
471 	if (!list_empty(list)) {
472 		if (is_isp_ispp) {
473 			buf = list_first_entry(list,
474 				struct rkisp_ispp_buf, list);
475 			list_del(&((struct rkisp_ispp_buf *)buf)->list);
476 		} else {
477 			buf = list_first_entry(list,
478 				struct rkispp_dummy_buffer, list);
479 			list_del(&((struct rkispp_dummy_buffer *)buf)->list);
480 		}
481 	}
482 	return buf;
483 }
484 
rkispp_start_3a_run(struct rkispp_device * dev)485 void rkispp_start_3a_run(struct rkispp_device *dev)
486 {
487 	struct rkispp_params_vdev *params_vdev;
488 	struct video_device *vdev;
489 	struct v4l2_event ev = {
490 		.type = CIFISP_V4L2_EVENT_STREAM_START,
491 	};
492 	int ret;
493 
494 	if (dev->ispp_ver == ISPP_V10)
495 		params_vdev = &dev->params_vdev[PARAM_VDEV_NR];
496 	else
497 		params_vdev = &dev->params_vdev[PARAM_VDEV_FEC];
498 	if (!params_vdev->is_subs_evt)
499 		return;
500 	vdev = &params_vdev->vnode.vdev;
501 	v4l2_event_queue(vdev, &ev);
502 	ret = wait_event_timeout(dev->sync_onoff,
503 			params_vdev->streamon && !params_vdev->first_params,
504 			msecs_to_jiffies(1000));
505 	if (!ret)
506 		v4l2_warn(&dev->v4l2_dev,
507 			  "waiting on params stream on event timeout\n");
508 	else
509 		v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
510 			 "Waiting for 3A on use %d ms\n", 1000 - ret);
511 }
512 
rkispp_stop_3a_run(struct rkispp_device * dev)513 static void rkispp_stop_3a_run(struct rkispp_device *dev)
514 {
515 	struct rkispp_params_vdev *params_vdev;
516 	struct video_device *vdev;
517 	struct v4l2_event ev = {
518 		.type = CIFISP_V4L2_EVENT_STREAM_STOP,
519 	};
520 	int ret;
521 
522 	if (dev->ispp_ver == ISPP_V10)
523 		params_vdev = &dev->params_vdev[PARAM_VDEV_NR];
524 	else
525 		params_vdev = &dev->params_vdev[PARAM_VDEV_FEC];
526 	if (!params_vdev->is_subs_evt)
527 		return;
528 	vdev = &params_vdev->vnode.vdev;
529 	v4l2_event_queue(vdev, &ev);
530 	ret = wait_event_timeout(dev->sync_onoff, !params_vdev->streamon,
531 				 msecs_to_jiffies(1000));
532 	if (!ret)
533 		v4l2_warn(&dev->v4l2_dev,
534 			  "waiting on params stream off event timeout\n");
535 	else
536 		v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
537 			 "Waiting for 3A off use %d ms\n", 1000 - ret);
538 }
539 
start_ii(struct rkispp_stream * stream)540 static int start_ii(struct rkispp_stream *stream)
541 {
542 	struct rkispp_device *dev = stream->isppdev;
543 	struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
544 	unsigned long lock_flags = 0;
545 	struct rkispp_buffer *buf;
546 	int i;
547 
548 	v4l2_subdev_call(&dev->ispp_sdev.sd, video, s_stream, true);
549 	spin_lock_irqsave(&stream->vbq_lock, lock_flags);
550 	while (!list_empty(&stream->buf_queue)) {
551 		buf = list_first_entry(&stream->buf_queue, struct rkispp_buffer, queue);
552 		list_del(&buf->queue);
553 		i = buf->vb.vb2_buf.index;
554 		vdev->input[i].priv = buf;
555 		vdev->input[i].index = dev->dev_id;
556 		vdev->input[i].frame_timestamp = buf->vb.vb2_buf.timestamp;
557 		vdev->input[i].frame_id = ++dev->ispp_sdev.frm_sync_seq;
558 		rkispp_event_handle(dev, CMD_QUEUE_DMABUF, &vdev->input[i]);
559 	}
560 	stream->streaming = true;
561 	spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
562 	return 0;
563 }
564 
config_ii(struct rkispp_stream * stream)565 static int config_ii(struct rkispp_stream *stream)
566 {
567 	struct rkispp_stream_vdev *stream_vdev = &stream->isppdev->stream_vdev;
568 
569 	stream->is_cfg = true;
570 	rkispp_start_3a_run(stream->isppdev);
571 	return stream_vdev->stream_ops->config_modules(stream->isppdev);
572 }
573 
is_stopped_ii(struct rkispp_stream * stream)574 static int is_stopped_ii(struct rkispp_stream *stream)
575 {
576 	stream->streaming = false;
577 	return true;
578 }
579 
secure_config_mb(struct rkispp_stream * stream)580 void secure_config_mb(struct rkispp_stream *stream)
581 {
582 	struct rkispp_device *dev = stream->isppdev;
583 	u32 limit_range, mult = 1;
584 
585 	/* enable dma immediately, config in idle state */
586 	switch (stream->last_module) {
587 	case ISPP_MODULE_TNR:
588 		rkispp_set_bits(dev, RKISPP_TNR_CTRL, FMT_WR_MASK,
589 				SW_TNR_1ST_FRM | stream->out_cap_fmt.wr_fmt << 4);
590 		break;
591 	case ISPP_MODULE_NR:
592 	case ISPP_MODULE_SHP:
593 		limit_range = (stream->out_fmt.quantization != V4L2_QUANTIZATION_LIM_RANGE) ?
594 			0 : SW_SHP_WR_YUV_LIMIT;
595 		rkispp_set_bits(dev, RKISPP_SHARP_CTRL,
596 				SW_SHP_WR_YUV_LIMIT | SW_SHP_WR_FORMAT_MASK,
597 				limit_range | stream->out_cap_fmt.wr_fmt);
598 		rkispp_clear_bits(dev, RKISPP_SHARP_CORE_CTRL, SW_SHP_DMA_DIS);
599 		break;
600 	case ISPP_MODULE_FEC:
601 		limit_range = (stream->out_fmt.quantization != V4L2_QUANTIZATION_LIM_RANGE) ?
602 			0 : SW_FEC_WR_YUV_LIMIT;
603 		rkispp_set_bits(dev, RKISPP_FEC_CTRL, SW_FEC_WR_YUV_LIMIT | FMT_WR_MASK,
604 				limit_range | stream->out_cap_fmt.wr_fmt << 4);
605 		rkispp_write(dev, RKISPP_FEC_DST_SIZE,
606 			     stream->out_fmt.height << 16 | stream->out_fmt.width);
607 		rkispp_clear_bits(dev, RKISPP_FEC_CORE_CTRL, SW_FEC2DDR_DIS);
608 		break;
609 	default:
610 		break;
611 	}
612 
613 	if (stream->out_cap_fmt.wr_fmt & FMT_YUYV)
614 		mult = 2;
615 	else if (stream->out_cap_fmt.wr_fmt & FMT_FBC)
616 		mult = 0;
617 	set_vir_stride(stream, ALIGN(stream->out_fmt.width * mult, 16) >> 2);
618 
619 	/* config first buf */
620 	rkispp_frame_end(stream, FRAME_INIT);
621 
622 	stream->is_cfg = true;
623 }
624 
config_mb(struct rkispp_stream * stream)625 static int config_mb(struct rkispp_stream *stream)
626 {
627 	struct rkispp_device *dev = stream->isppdev;
628 	u32 i;
629 
630 	for (i = ISPP_MODULE_FEC; i > 0; i = i >> 1) {
631 		if (dev->stream_vdev.module_ens & i)
632 			break;
633 	}
634 	if (!i)
635 		return -EINVAL;
636 
637 	stream->last_module = i;
638 	switch (i) {
639 	case ISPP_MODULE_TNR:
640 		stream->config->frame_end_id = TNR_INT;
641 		stream->config->reg.cur_y_base = RKISPP_TNR_WR_Y_BASE;
642 		stream->config->reg.cur_uv_base = RKISPP_TNR_WR_UV_BASE;
643 		stream->config->reg.cur_vir_stride = RKISPP_TNR_WR_VIR_STRIDE;
644 		stream->config->reg.cur_y_base_shd = RKISPP_TNR_WR_Y_BASE_SHD;
645 		stream->config->reg.cur_uv_base_shd = RKISPP_TNR_WR_UV_BASE_SHD;
646 		break;
647 	case ISPP_MODULE_NR:
648 	case ISPP_MODULE_SHP:
649 		stream->config->frame_end_id = SHP_INT;
650 		stream->config->reg.cur_y_base = RKISPP_SHARP_WR_Y_BASE;
651 		stream->config->reg.cur_uv_base = RKISPP_SHARP_WR_UV_BASE;
652 		stream->config->reg.cur_vir_stride = RKISPP_SHARP_WR_VIR_STRIDE;
653 		stream->config->reg.cur_y_base_shd = RKISPP_SHARP_WR_Y_BASE_SHD;
654 		stream->config->reg.cur_uv_base_shd = RKISPP_SHARP_WR_UV_BASE_SHD;
655 		break;
656 	default:
657 		stream->config->frame_end_id = FEC_INT;
658 		stream->config->reg.cur_y_base = RKISPP_FEC_WR_Y_BASE;
659 		stream->config->reg.cur_uv_base = RKISPP_FEC_WR_UV_BASE;
660 		stream->config->reg.cur_vir_stride = RKISPP_FEC_WR_VIR_STRIDE;
661 		stream->config->reg.cur_y_base_shd = RKISPP_FEC_WR_Y_BASE_SHD;
662 		stream->config->reg.cur_uv_base_shd = RKISPP_FEC_WR_UV_BASE_SHD;
663 	}
664 
665 	if (dev->ispp_sdev.state == ISPP_STOP)
666 		secure_config_mb(stream);
667 	v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
668 		 "%s last module:%d\n", __func__, i);
669 	return 0;
670 }
671 
is_stopped_mb(struct rkispp_stream * stream)672 static int is_stopped_mb(struct rkispp_stream *stream)
673 {
674 	struct rkispp_device *dev = stream->isppdev;
675 	struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
676 	bool is_stopped = true;
677 	u32 val;
678 
679 	if (vdev->module_ens & ISPP_MODULE_FEC) {
680 		/* close dma write immediately */
681 		rkispp_clear_bits(dev, RKISPP_FEC_CTRL, FMT_FBC << 4);
682 		rkispp_set_bits(dev, RKISPP_FEC_CORE_CTRL,
683 				0, SW_FEC2DDR_DIS);
684 	} else if (vdev->module_ens & (ISPP_MODULE_NR | ISPP_MODULE_SHP)) {
685 		val = dev->hw_dev->dummy_buf.dma_addr;
686 		rkispp_write(dev, RKISPP_SHARP_WR_Y_BASE, val);
687 		rkispp_write(dev, RKISPP_SHARP_WR_UV_BASE, val);
688 		if (dev->inp == INP_ISP)
689 			rkispp_set_bits(dev, RKISPP_SHARP_CTRL, SW_SHP_WR_FORMAT_MASK, FMT_FBC);
690 	}
691 
692 	/* for wait last frame */
693 	if (atomic_read(&dev->stream_vdev.refcnt) == 1) {
694 		val = readl(dev->hw_dev->base_addr + RKISPP_CTRL_SYS_STATUS);
695 		is_stopped = (val & 0x8f) ? false : true;
696 	}
697 
698 	return is_stopped;
699 }
700 
limit_check_mb(struct rkispp_stream * stream,struct v4l2_pix_format_mplane * try_fmt)701 static int limit_check_mb(struct rkispp_stream *stream,
702 			  struct v4l2_pix_format_mplane *try_fmt)
703 {
704 	struct rkispp_device *dev = stream->isppdev;
705 	struct rkispp_subdev *sdev = &dev->ispp_sdev;
706 	u32 *w = try_fmt ? &try_fmt->width : &stream->out_fmt.width;
707 	u32 *h = try_fmt ? &try_fmt->height : &stream->out_fmt.height;
708 
709 	if (*w != sdev->out_fmt.width || *h != sdev->out_fmt.height) {
710 		v4l2_err(&dev->v4l2_dev,
711 			 "output:%dx%d should euqal to input:%dx%d\n",
712 			 *w, *h, sdev->out_fmt.width, sdev->out_fmt.height);
713 		if (!try_fmt) {
714 			*w = 0;
715 			*h = 0;
716 		}
717 		return -EINVAL;
718 	}
719 
720 	return 0;
721 }
722 
config_scl(struct rkispp_stream * stream)723 static int config_scl(struct rkispp_stream *stream)
724 {
725 	struct rkispp_device *dev = stream->isppdev;
726 	const struct capture_fmt *fmt = &stream->out_cap_fmt;
727 	u32 in_width = dev->ispp_sdev.out_fmt.width;
728 	u32 in_height = dev->ispp_sdev.out_fmt.height;
729 	u32 hy_fac = (stream->out_fmt.width - 1) * 8192 /
730 			(in_width - 1) + 1;
731 	u32 vy_fac = (stream->out_fmt.height - 1) * 8192 /
732 			(in_height - 1) + 1;
733 	u32 val = SW_SCL_ENABLE, mult = 1;
734 	u32 mask = SW_SCL_WR_YUV_LIMIT | SW_SCL_WR_YUYV_YCSWAP |
735 		SW_SCL_WR_YUYV_FORMAT | SW_SCL_WR_YUV_FORMAT |
736 		SW_SCL_WR_UV_DIS | SW_SCL_BYPASS;
737 
738 	/* config first buf */
739 	rkispp_frame_end(stream, FRAME_INIT);
740 	if (hy_fac == 8193 && vy_fac == 8193)
741 		val |= SW_SCL_BYPASS;
742 	if (fmt->wr_fmt & FMT_YUYV)
743 		mult = 2;
744 	set_vir_stride(stream, ALIGN(stream->out_fmt.width * mult, 16) >> 2);
745 	set_scl_factor(stream, vy_fac << 16 | hy_fac);
746 	val |= fmt->wr_fmt << 3 |
747 		((fmt->fourcc != V4L2_PIX_FMT_GREY) ? 0 : SW_SCL_WR_UV_DIS) |
748 		((stream->out_fmt.quantization != V4L2_QUANTIZATION_LIM_RANGE) ?
749 		 0 : SW_SCL_WR_YUV_LIMIT);
750 	rkispp_set_bits(dev, stream->config->reg.ctrl, mask, val);
751 	stream->is_cfg = true;
752 
753 	v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
754 		 "scl%d ctrl:0x%x stride:0x%x factor:0x%x\n",
755 		 stream->id - STREAM_S0,
756 		 rkispp_read(dev, stream->config->reg.ctrl),
757 		 rkispp_read(dev, stream->config->reg.cur_vir_stride),
758 		 rkispp_read(dev, stream->config->reg.factor));
759 	return 0;
760 }
761 
stop_scl(struct rkispp_stream * stream)762 static void stop_scl(struct rkispp_stream *stream)
763 {
764 	struct rkispp_device *dev = stream->isppdev;
765 
766 	rkispp_clear_bits(dev, stream->config->reg.ctrl, SW_SCL_ENABLE);
767 }
768 
is_stopped_scl(struct rkispp_stream * stream)769 static int is_stopped_scl(struct rkispp_stream *stream)
770 {
771 	struct rkispp_device *dev = stream->isppdev;
772 	u32 scl_en, other_en = 0, val = SW_SCL_ENABLE;
773 	bool is_stopped;
774 
775 	if (dev->hw_dev->is_single)
776 		val = SW_SCL_ENABLE_SHD;
777 	scl_en = rkispp_read(dev, stream->config->reg.ctrl) & val;
778 	if (atomic_read(&dev->stream_vdev.refcnt) == 1) {
779 		val = readl(dev->hw_dev->base_addr + RKISPP_CTRL_SYS_STATUS);
780 		other_en = val & 0x8f;
781 	}
782 	is_stopped = (scl_en | other_en) ? false : true;
783 	return is_stopped;
784 }
785 
limit_check_scl(struct rkispp_stream * stream,struct v4l2_pix_format_mplane * try_fmt)786 static int limit_check_scl(struct rkispp_stream *stream,
787 			   struct v4l2_pix_format_mplane *try_fmt)
788 {
789 	struct rkispp_device *dev = stream->isppdev;
790 	struct rkispp_subdev *sdev = &dev->ispp_sdev;
791 	u32 max_width = 1280, max_ratio = 8, min_ratio = 2;
792 	u32 *w = try_fmt ? &try_fmt->width : &stream->out_fmt.width;
793 	u32 *h = try_fmt ? &try_fmt->height : &stream->out_fmt.height;
794 	u32 forcc = try_fmt ? try_fmt->pixelformat : stream->out_fmt.pixelformat;
795 	int ret = 0;
796 
797 	/* bypass scale */
798 	if (*w == sdev->out_fmt.width && *h == sdev->out_fmt.height)
799 		return ret;
800 
801 	if (stream->id == STREAM_S0) {
802 		if (*h == sdev->out_fmt.height || (forcc != V4L2_PIX_FMT_NV12))
803 			max_width = 3264;
804 		else
805 			max_width = 2080;
806 		min_ratio = 1;
807 	}
808 
809 	if (*w > max_width ||
810 	    *w * max_ratio < sdev->out_fmt.width ||
811 	    *h * max_ratio < sdev->out_fmt.height ||
812 	    *w * min_ratio > sdev->out_fmt.width ||
813 	    *h * min_ratio > sdev->out_fmt.height) {
814 		v4l2_err(&dev->v4l2_dev,
815 			 "scale%d:%dx%d out of range:\n"
816 			 "\t[width max:%d ratio max:%d min:%d]\n",
817 			 stream->id - STREAM_S0, *w, *h,
818 			 max_width, max_ratio, min_ratio);
819 		if (!try_fmt) {
820 			*w = 0;
821 			*h = 0;
822 		}
823 		ret = -EINVAL;
824 	}
825 
826 	return ret;
827 }
828 
829 static struct streams_ops input_stream_ops = {
830 	.config = config_ii,
831 	.start = start_ii,
832 	.is_stopped = is_stopped_ii,
833 };
834 
835 static struct streams_ops mb_stream_ops = {
836 	.config = config_mb,
837 	.is_stopped = is_stopped_mb,
838 	.limit_check = limit_check_mb,
839 };
840 
841 static struct streams_ops scal_stream_ops = {
842 	.config = config_scl,
843 	.stop = stop_scl,
844 	.is_stopped = is_stopped_scl,
845 	.limit_check = limit_check_scl,
846 };
847 
848 /***************************** vb2 operations*******************************/
849 
rkispp_queue_setup(struct vb2_queue * queue,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_ctxs[])850 static int rkispp_queue_setup(struct vb2_queue *queue,
851 			      unsigned int *num_buffers,
852 			      unsigned int *num_planes,
853 			      unsigned int sizes[],
854 			      struct device *alloc_ctxs[])
855 {
856 	struct rkispp_stream *stream = queue->drv_priv;
857 	struct rkispp_device *dev = stream->isppdev;
858 	const struct v4l2_pix_format_mplane *pixm = NULL;
859 	const struct capture_fmt *cap_fmt = NULL;
860 	u32 i;
861 
862 	pixm = &stream->out_fmt;
863 	if (!pixm->width || !pixm->height)
864 		return -EINVAL;
865 	cap_fmt = &stream->out_cap_fmt;
866 	*num_planes = cap_fmt->mplanes;
867 
868 	for (i = 0; i < cap_fmt->mplanes; i++) {
869 		const struct v4l2_plane_pix_format *plane_fmt;
870 
871 		plane_fmt = &pixm->plane_fmt[i];
872 		/* height to align with 16 when allocating memory
873 		 * so that Rockchip encoder can use DMA buffer directly
874 		 */
875 		sizes[i] = (stream->type == STREAM_OUTPUT &&
876 			    cap_fmt->wr_fmt != FMT_FBC) ?
877 				plane_fmt->sizeimage / pixm->height *
878 				ALIGN(pixm->height, 16) :
879 				plane_fmt->sizeimage;
880 	}
881 
882 	if (stream->is_reg_withstream &&
883 	    (cap_fmt->wr_fmt & FMT_FBC || cap_fmt->wr_fmt == FMT_YUV420)) {
884 		(*num_planes)++;
885 		sizes[1] = sizeof(struct rkisp_ispp_reg);
886 	}
887 
888 	v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
889 		 "%s stream:%d count %d, size %d\n",
890 		 v4l2_type_names[queue->type],
891 		 stream->id, *num_buffers, sizes[0]);
892 
893 	return 0;
894 }
895 
rkispp_buf_queue(struct vb2_buffer * vb)896 static void rkispp_buf_queue(struct vb2_buffer *vb)
897 {
898 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
899 	struct rkispp_buffer *isppbuf = to_rkispp_buffer(vbuf);
900 	struct vb2_queue *queue = vb->vb2_queue;
901 	struct rkispp_stream *stream = queue->drv_priv;
902 	struct rkispp_device *dev = stream->isppdev;
903 	struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
904 	struct v4l2_pix_format_mplane *pixm = &stream->out_fmt;
905 	struct capture_fmt *cap_fmt = &stream->out_cap_fmt;
906 	unsigned long lock_flags = 0;
907 	u32 height, size, offset;
908 	struct sg_table *sgt;
909 	int i;
910 
911 	memset(isppbuf->buff_addr, 0, sizeof(isppbuf->buff_addr));
912 	for (i = 0; i < cap_fmt->mplanes; i++) {
913 		if (stream->isppdev->hw_dev->is_dma_sg_ops) {
914 			sgt = vb2_dma_sg_plane_desc(vb, i);
915 			isppbuf->buff_addr[i] = sg_dma_address(sgt->sgl);
916 		} else {
917 			isppbuf->buff_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
918 		}
919 	}
920 	/*
921 	 * NOTE: plane_fmt[0].sizeimage is total size of all planes for single
922 	 * memory plane formats, so calculate the size explicitly.
923 	 */
924 	if (cap_fmt->mplanes == 1) {
925 		for (i = 0; i < cap_fmt->cplanes - 1; i++) {
926 			/* FBC mode calculate payload offset */
927 			height = (cap_fmt->wr_fmt & FMT_FBC) ?
928 				ALIGN(pixm->height, 16) >> 4 : pixm->height;
929 			size = (i == 0) ?
930 				pixm->plane_fmt[i].bytesperline * height :
931 				pixm->plane_fmt[i].sizeimage;
932 			offset = (cap_fmt->wr_fmt & FMT_FBC) ?
933 				ALIGN(size, RK_MPP_ALIGN) : size;
934 			if (cap_fmt->wr_fmt & FMT_FBC && dev->ispp_ver == ISPP_V20)
935 				rkispp_write(dev, RKISPP_FEC_FBCE_HEAD_OFFSET,
936 					     offset | SW_OFFSET_ENABLE);
937 
938 			isppbuf->buff_addr[i + 1] =
939 				isppbuf->buff_addr[i] + offset;
940 		}
941 	}
942 
943 	v4l2_dbg(2, rkispp_debug, &stream->isppdev->v4l2_dev,
944 		 "%s stream:%d buf:0x%x\n", __func__,
945 		 stream->id, isppbuf->buff_addr[0]);
946 
947 	spin_lock_irqsave(&stream->vbq_lock, lock_flags);
948 	if (stream->type == STREAM_OUTPUT ||
949 	    (stream->id == STREAM_II && !stream->streaming)) {
950 		list_add_tail(&isppbuf->queue, &stream->buf_queue);
951 	} else {
952 		i = vb->index;
953 		vdev->input[i].priv = isppbuf;
954 		vdev->input[i].index = dev->dev_id;
955 		vdev->input[i].frame_timestamp = vb->timestamp;
956 		vdev->input[i].frame_id = ++dev->ispp_sdev.frm_sync_seq;
957 		rkispp_event_handle(dev, CMD_QUEUE_DMABUF, &vdev->input[i]);
958 	}
959 	spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
960 }
961 
rkispp_stream_stop(struct rkispp_stream * stream)962 static void rkispp_stream_stop(struct rkispp_stream *stream)
963 {
964 	struct rkispp_device *dev = stream->isppdev;
965 	bool is_wait = true;
966 	int ret = 0;
967 
968 	stream->stopping = true;
969 	if (atomic_read(&dev->stream_vdev.refcnt) == 1) {
970 		v4l2_subdev_call(&dev->ispp_sdev.sd, video, s_stream, false);
971 		rkispp_stop_3a_run(dev);
972 		if (dev->stream_vdev.fec.is_end &&
973 		    (dev->dev_id != dev->hw_dev->cur_dev_id || dev->hw_dev->is_idle))
974 			is_wait = false;
975 	}
976 	if (is_wait) {
977 		ret = wait_event_timeout(stream->done,
978 					 !stream->streaming,
979 					 msecs_to_jiffies(500));
980 		if (!ret)
981 			v4l2_warn(&dev->v4l2_dev,
982 				  "stream:%d stop timeout\n", stream->id);
983 	}
984 	if (stream->ops) {
985 		/* scl stream close dma write */
986 		if (stream->ops->stop)
987 			stream->ops->stop(stream);
988 		else if (stream->ops->is_stopped)
989 			/* mb stream close dma write immediately */
990 			stream->ops->is_stopped(stream);
991 	}
992 	stream->is_upd = false;
993 	stream->streaming = false;
994 	stream->stopping = false;
995 }
996 
destroy_buf_queue(struct rkispp_stream * stream,enum vb2_buffer_state state)997 static void destroy_buf_queue(struct rkispp_stream *stream,
998 			      enum vb2_buffer_state state)
999 {
1000 	struct vb2_queue *queue = &stream->vnode.buf_queue;
1001 	unsigned long lock_flags = 0;
1002 	struct rkispp_buffer *buf;
1003 	u32 i;
1004 
1005 	spin_lock_irqsave(&stream->vbq_lock, lock_flags);
1006 	if (stream->curr_buf) {
1007 		list_add_tail(&stream->curr_buf->queue, &stream->buf_queue);
1008 		stream->curr_buf = NULL;
1009 	}
1010 	while (!list_empty(&stream->buf_queue)) {
1011 		buf = list_first_entry(&stream->buf_queue,
1012 			struct rkispp_buffer, queue);
1013 		list_del(&buf->queue);
1014 		vb2_buffer_done(&buf->vb.vb2_buf, state);
1015 	}
1016 	spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
1017 
1018 	for (i = 0; i < queue->num_buffers; ++i) {
1019 		if (queue->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
1020 			vb2_buffer_done(queue->bufs[i], VB2_BUF_STATE_ERROR);
1021 	}
1022 }
1023 
rkispp_stop_streaming(struct vb2_queue * queue)1024 static void rkispp_stop_streaming(struct vb2_queue *queue)
1025 {
1026 	struct rkispp_stream *stream = queue->drv_priv;
1027 	struct rkispp_device *dev = stream->isppdev;
1028 	struct rkispp_hw_dev *hw = dev->hw_dev;
1029 	struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1030 
1031 	v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1032 		 "%s id:%d enter\n", __func__, stream->id);
1033 
1034 	if (!stream->streaming)
1035 		return;
1036 
1037 	if (stream->id == STREAM_VIR) {
1038 		stream->stopping = true;
1039 		wait_event_timeout(stream->done,
1040 				   stream->is_end,
1041 				   msecs_to_jiffies(500));
1042 		stream->streaming = false;
1043 		stream->stopping = false;
1044 		destroy_buf_queue(stream, VB2_BUF_STATE_ERROR);
1045 		if (!completion_done(&dev->stream_vdev.vir_cpy.cmpl))
1046 			complete(&dev->stream_vdev.vir_cpy.cmpl);
1047 		return;
1048 	}
1049 
1050 	mutex_lock(&dev->hw_dev->dev_lock);
1051 	rkispp_stream_stop(stream);
1052 	destroy_buf_queue(stream, VB2_BUF_STATE_ERROR);
1053 	vdev->stream_ops->destroy_buf(stream);
1054 	mutex_unlock(&dev->hw_dev->dev_lock);
1055 	rkispp_free_common_dummy_buf(dev);
1056 	atomic_dec(&dev->stream_vdev.refcnt);
1057 
1058 	if (!atomic_read(&hw->refcnt) &&
1059 	    !atomic_read(&dev->stream_vdev.refcnt)) {
1060 		rkispp_set_clk_rate(hw->clks[0], hw->core_clk_min);
1061 		hw->is_idle = true;
1062 		hw->is_first = true;
1063 	}
1064 	v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1065 		 "%s id:%d exit\n", __func__, stream->id);
1066 }
1067 
rkispp_start_streaming(struct vb2_queue * queue,unsigned int count)1068 static int rkispp_start_streaming(struct vb2_queue *queue,
1069 				  unsigned int count)
1070 {
1071 	struct rkispp_stream *stream = queue->drv_priv;
1072 	struct rkispp_device *dev = stream->isppdev;
1073 	struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1074 	struct rkispp_hw_dev *hw = dev->hw_dev;
1075 	int ret = -1;
1076 
1077 	v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1078 		 "%s id:%d enter\n", __func__, stream->id);
1079 
1080 	if (stream->streaming)
1081 		return -EBUSY;
1082 
1083 	stream->is_end = true;
1084 	if (stream->id == STREAM_VIR) {
1085 		struct rkispp_stream *t = &dev->stream_vdev.stream[stream->conn_id];
1086 
1087 		if (t->streaming) {
1088 			INIT_WORK(&dev->stream_vdev.vir_cpy.work, vir_cpy_image);
1089 			init_completion(&dev->stream_vdev.vir_cpy.cmpl);
1090 			INIT_LIST_HEAD(&dev->stream_vdev.vir_cpy.queue);
1091 			dev->stream_vdev.vir_cpy.stream = stream;
1092 			schedule_work(&dev->stream_vdev.vir_cpy.work);
1093 			ret = 0;
1094 		} else {
1095 			v4l2_err(&dev->v4l2_dev,
1096 				 "no stream enable for iqtool\n");
1097 			destroy_buf_queue(stream, VB2_BUF_STATE_QUEUED);
1098 			ret = -EINVAL;
1099 		}
1100 		return ret;
1101 	}
1102 
1103 	if (!atomic_read(&hw->refcnt) &&
1104 	    !atomic_read(&dev->stream_vdev.refcnt) &&
1105 	    clk_get_rate(hw->clks[0]) <= hw->core_clk_min &&
1106 	    (dev->inp == INP_DDR || dev->ispp_ver == ISPP_V20)) {
1107 		dev->hw_dev->is_first = false;
1108 		rkispp_set_clk_rate(hw->clks[0], hw->core_clk_max);
1109 	}
1110 
1111 	stream->is_upd = false;
1112 	stream->is_cfg = false;
1113 	atomic_inc(&dev->stream_vdev.refcnt);
1114 	if (!dev->inp || !stream->linked) {
1115 		v4l2_err(&dev->v4l2_dev,
1116 			 "no link or invalid input source\n");
1117 		goto free_buf_queue;
1118 	}
1119 
1120 	ret = rkispp_alloc_common_dummy_buf(stream->isppdev);
1121 	if (ret < 0)
1122 		goto free_buf_queue;
1123 
1124 	if (dev->inp == INP_ISP) {
1125 		if (dev->ispp_ver == ISPP_V10)
1126 			dev->stream_vdev.module_ens |= ISPP_MODULE_NR;
1127 		else if (dev->ispp_ver == ISPP_V20)
1128 			dev->stream_vdev.module_ens = ISPP_MODULE_FEC;
1129 	}
1130 
1131 	if (stream->ops && stream->ops->config) {
1132 		ret = stream->ops->config(stream);
1133 		if (ret < 0)
1134 			goto free_dummy_buf;
1135 	}
1136 
1137 	/* start from ddr */
1138 	if (stream->ops && stream->ops->start)
1139 		stream->ops->start(stream);
1140 
1141 	stream->streaming = true;
1142 
1143 	/* start from isp */
1144 	ret = vdev->stream_ops->start_isp(dev);
1145 	if (ret)
1146 		goto free_dummy_buf;
1147 
1148 	v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1149 		 "%s id:%d exit\n", __func__, stream->id);
1150 	return 0;
1151 free_dummy_buf:
1152 	rkispp_free_common_dummy_buf(stream->isppdev);
1153 free_buf_queue:
1154 	destroy_buf_queue(stream, VB2_BUF_STATE_QUEUED);
1155 	vdev->stream_ops->destroy_buf(stream);
1156 	atomic_dec(&dev->stream_vdev.refcnt);
1157 	stream->streaming = false;
1158 	stream->is_upd = false;
1159 	v4l2_err(&dev->v4l2_dev, "%s id:%d failed ret:%d\n",
1160 		 __func__, stream->id, ret);
1161 	return ret;
1162 }
1163 
1164 static struct vb2_ops stream_vb2_ops = {
1165 	.queue_setup = rkispp_queue_setup,
1166 	.buf_queue = rkispp_buf_queue,
1167 	.wait_prepare = vb2_ops_wait_prepare,
1168 	.wait_finish = vb2_ops_wait_finish,
1169 	.stop_streaming = rkispp_stop_streaming,
1170 	.start_streaming = rkispp_start_streaming,
1171 };
1172 
rkispp_init_vb2_queue(struct vb2_queue * q,struct rkispp_stream * stream,enum v4l2_buf_type buf_type)1173 static int rkispp_init_vb2_queue(struct vb2_queue *q,
1174 				 struct rkispp_stream *stream,
1175 				 enum v4l2_buf_type buf_type)
1176 {
1177 	q->type = buf_type;
1178 	q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_USERPTR;
1179 	q->drv_priv = stream;
1180 	q->ops = &stream_vb2_ops;
1181 	q->mem_ops = stream->isppdev->hw_dev->mem_ops;
1182 	q->buf_struct_size = sizeof(struct rkispp_buffer);
1183 	if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1184 		q->min_buffers_needed = STREAM_IN_REQ_BUFS_MIN;
1185 		q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1186 	} else {
1187 		q->min_buffers_needed = STREAM_OUT_REQ_BUFS_MIN;
1188 		q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1189 	}
1190 	q->lock = &stream->isppdev->apilock;
1191 	q->dev = stream->isppdev->hw_dev->dev;
1192 	q->allow_cache_hints = 1;
1193 	q->bidirectional = 1;
1194 	if (stream->isppdev->hw_dev->is_dma_contig)
1195 		q->dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
1196 	q->gfp_flags = GFP_DMA32;
1197 	return vb2_queue_init(q);
1198 }
1199 
rkispp_set_fmt(struct rkispp_stream * stream,struct v4l2_pix_format_mplane * pixm,bool try)1200 static int rkispp_set_fmt(struct rkispp_stream *stream,
1201 			  struct v4l2_pix_format_mplane *pixm,
1202 			  bool try)
1203 {
1204 	struct rkispp_device *dev = stream->isppdev;
1205 	struct rkispp_subdev *sdev = &dev->ispp_sdev;
1206 	const struct capture_fmt *fmt;
1207 	unsigned int imagsize = 0;
1208 	unsigned int planes;
1209 	u32 xsubs = 1, ysubs = 1;
1210 	unsigned int i;
1211 
1212 	if (stream->id == STREAM_VIR) {
1213 		for (i = STREAM_MB; i <= STREAM_S2; i++) {
1214 			struct rkispp_stream *t = &dev->stream_vdev.stream[i];
1215 
1216 			if (t->out_cap_fmt.wr_fmt & FMT_FBC || !t->streaming)
1217 				continue;
1218 			if (t->out_fmt.plane_fmt[0].sizeimage > imagsize) {
1219 				imagsize = t->out_fmt.plane_fmt[0].sizeimage;
1220 				*pixm = t->out_fmt;
1221 				stream->conn_id = t->id;
1222 			}
1223 		}
1224 		if (!imagsize) {
1225 			v4l2_err(&dev->v4l2_dev, "no output stream for iqtool\n");
1226 			return -EINVAL;
1227 		}
1228 		imagsize = 0;
1229 	}
1230 
1231 	fmt = find_fmt(stream, pixm->pixelformat);
1232 	if (!fmt) {
1233 		v4l2_err(&dev->v4l2_dev,
1234 			 "nonsupport pixelformat:%c%c%c%c\n",
1235 			 pixm->pixelformat,
1236 			 pixm->pixelformat >> 8,
1237 			 pixm->pixelformat >> 16,
1238 			 pixm->pixelformat >> 24);
1239 		return -EINVAL;
1240 	}
1241 
1242 	pixm->num_planes = fmt->mplanes;
1243 	pixm->field = V4L2_FIELD_NONE;
1244 	if (!pixm->quantization)
1245 		pixm->quantization = V4L2_QUANTIZATION_FULL_RANGE;
1246 
1247 	/* calculate size */
1248 	fcc_xysubs(fmt->fourcc, &xsubs, &ysubs);
1249 	planes = fmt->cplanes ? fmt->cplanes : fmt->mplanes;
1250 	for (i = 0; i < planes; i++) {
1251 		struct v4l2_plane_pix_format *plane_fmt;
1252 		unsigned int width, height, bytesperline, w, h;
1253 
1254 		plane_fmt = pixm->plane_fmt + i;
1255 
1256 		if (pixm->width == RKISPP_MAX_WIDTH_V20) {
1257 			w = ALIGN(pixm->width, 16);
1258 			h = ALIGN(pixm->height, 16);
1259 		} else {
1260 			w = (fmt->wr_fmt & FMT_FBC) ?
1261 				ALIGN(pixm->width, 16) : pixm->width;
1262 			h = (fmt->wr_fmt & FMT_FBC) ?
1263 				ALIGN(pixm->height, 16) : pixm->height;
1264 		}
1265 
1266 		width = i ? w / xsubs : w;
1267 		height = i ? h / ysubs : h;
1268 
1269 		bytesperline = width * DIV_ROUND_UP(fmt->bpp[i], 8);
1270 
1271 		if (i != 0 || plane_fmt->bytesperline < bytesperline)
1272 			plane_fmt->bytesperline = bytesperline;
1273 
1274 		plane_fmt->sizeimage = plane_fmt->bytesperline * height;
1275 		/* FBC header: width * height / 16, and 4096 align for mpp
1276 		 * FBC payload: yuv420 or yuv422 size
1277 		 * FBC width and height need 16 align
1278 		 */
1279 		if (fmt->wr_fmt & FMT_FBC && i == 0)
1280 			plane_fmt->sizeimage =
1281 				ALIGN(plane_fmt->sizeimage >> 4, RK_MPP_ALIGN);
1282 		else if (fmt->wr_fmt & FMT_FBC)
1283 			plane_fmt->sizeimage += w * h;
1284 		imagsize += plane_fmt->sizeimage;
1285 	}
1286 
1287 	if (fmt->mplanes == 1)
1288 		pixm->plane_fmt[0].sizeimage = imagsize;
1289 
1290 	stream->is_reg_withstream = rkispp_is_reg_withstream_local(&stream->vnode.vdev.dev);
1291 	if (stream->is_reg_withstream &&
1292 	    (fmt->wr_fmt & FMT_FBC || fmt->wr_fmt == FMT_YUV420))
1293 		pixm->num_planes++;
1294 
1295 	if (!try) {
1296 		stream->out_cap_fmt = *fmt;
1297 		stream->out_fmt = *pixm;
1298 
1299 		if (stream->id == STREAM_II && stream->linked) {
1300 			sdev->in_fmt.width = pixm->width;
1301 			sdev->in_fmt.height = pixm->height;
1302 			sdev->out_fmt.width = pixm->width;
1303 			sdev->out_fmt.height = pixm->height;
1304 		}
1305 		v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1306 			 "%s: stream: %d req(%d, %d) out(%d, %d)\n",
1307 			 __func__, stream->id, pixm->width, pixm->height,
1308 			 stream->out_fmt.width, stream->out_fmt.height);
1309 
1310 		if (dev->ispp_ver == ISPP_V10) {
1311 			if (sdev->out_fmt.width > RKISPP_MAX_WIDTH_V10 ||
1312 			sdev->out_fmt.height > RKISPP_MAX_HEIGHT_V10 ||
1313 			sdev->out_fmt.width < RKISPP_MIN_WIDTH_V10 ||
1314 			sdev->out_fmt.height < RKISPP_MIN_HEIGHT_V10) {
1315 				v4l2_err(&dev->v4l2_dev,
1316 					"ispp input min:%dx%d max:%dx%d\n",
1317 					RKISPP_MIN_WIDTH_V10, RKISPP_MIN_HEIGHT_V10,
1318 					RKISPP_MAX_WIDTH_V10, RKISPP_MAX_HEIGHT_V10);
1319 				stream->out_fmt.width = 0;
1320 				stream->out_fmt.height = 0;
1321 				return -EINVAL;
1322 			}
1323 		} else if (dev->ispp_ver == ISPP_V20) {
1324 			if (sdev->out_fmt.width > RKISPP_MAX_WIDTH_V20 ||
1325 			sdev->out_fmt.height > RKISPP_MAX_HEIGHT_V20 ||
1326 			sdev->out_fmt.width < RKISPP_MIN_WIDTH_V20 ||
1327 			sdev->out_fmt.height < RKISPP_MIN_HEIGHT_V20) {
1328 				v4l2_err(&dev->v4l2_dev,
1329 					"ispp input min:%dx%d max:%dx%d\n",
1330 					RKISPP_MIN_WIDTH_V20, RKISPP_MIN_HEIGHT_V20,
1331 					RKISPP_MAX_WIDTH_V20, RKISPP_MAX_HEIGHT_V20);
1332 				stream->out_fmt.width = 0;
1333 				stream->out_fmt.height = 0;
1334 				return -EINVAL;
1335 			}
1336 		}
1337 	}
1338 
1339 	if (stream->ops && stream->ops->limit_check)
1340 		return stream->ops->limit_check(stream, try ? pixm : NULL);
1341 
1342 	return 0;
1343 }
1344 
1345 /************************* v4l2_file_operations***************************/
1346 
rkispp_fh_open(struct file * filp)1347 static int rkispp_fh_open(struct file *filp)
1348 {
1349 	struct rkispp_stream *stream = video_drvdata(filp);
1350 	struct rkispp_device *isppdev = stream->isppdev;
1351 	int ret;
1352 
1353 	ret = v4l2_fh_open(filp);
1354 	if (!ret) {
1355 		ret = v4l2_pipeline_pm_get(&stream->vnode.vdev.entity);
1356 		if (ret < 0) {
1357 			v4l2_err(&isppdev->v4l2_dev,
1358 				 "pipeline power on failed %d\n", ret);
1359 			vb2_fop_release(filp);
1360 		}
1361 	}
1362 	return ret;
1363 }
1364 
rkispp_fh_release(struct file * filp)1365 static int rkispp_fh_release(struct file *filp)
1366 {
1367 	struct rkispp_stream *stream = video_drvdata(filp);
1368 	int ret;
1369 
1370 	ret = vb2_fop_release(filp);
1371 	if (!ret)
1372 		v4l2_pipeline_pm_put(&stream->vnode.vdev.entity);
1373 	return ret;
1374 }
1375 
1376 static const struct v4l2_file_operations rkispp_fops = {
1377 	.open = rkispp_fh_open,
1378 	.release = rkispp_fh_release,
1379 	.unlocked_ioctl = video_ioctl2,
1380 	.poll = vb2_fop_poll,
1381 	.mmap = vb2_fop_mmap,
1382 };
1383 
rkispp_enum_input(struct file * file,void * priv,struct v4l2_input * input)1384 static int rkispp_enum_input(struct file *file, void *priv,
1385 			struct v4l2_input *input)
1386 {
1387 	if (input->index > 0)
1388 		return -EINVAL;
1389 
1390 	input->type = V4L2_INPUT_TYPE_CAMERA;
1391 	strscpy(input->name, "Camera", sizeof(input->name));
1392 
1393 	return 0;
1394 }
1395 
rkispp_try_fmt_vid_mplane(struct file * file,void * fh,struct v4l2_format * f)1396 static int rkispp_try_fmt_vid_mplane(struct file *file, void *fh,
1397 					 struct v4l2_format *f)
1398 {
1399 	struct rkispp_stream *stream = video_drvdata(file);
1400 
1401 	return rkispp_set_fmt(stream, &f->fmt.pix_mp, true);
1402 }
1403 
rkispp_enum_fmt_vid_mplane(struct file * file,void * priv,struct v4l2_fmtdesc * f)1404 static int rkispp_enum_fmt_vid_mplane(struct file *file, void *priv,
1405 				      struct v4l2_fmtdesc *f)
1406 {
1407 	struct rkispp_stream *stream = video_drvdata(file);
1408 	const struct capture_fmt *fmt = NULL;
1409 
1410 	if (f->index >= stream->config->fmt_size)
1411 		return -EINVAL;
1412 
1413 	fmt = &stream->config->fmts[f->index];
1414 	f->pixelformat = fmt->fourcc;
1415 	switch (f->pixelformat) {
1416 	case V4L2_PIX_FMT_FBC2:
1417 		strscpy(f->description,
1418 			"Rockchip yuv422sp fbc encoder",
1419 			sizeof(f->description));
1420 		break;
1421 	case V4L2_PIX_FMT_FBC0:
1422 		strscpy(f->description,
1423 			"Rockchip yuv420sp fbc encoder",
1424 			sizeof(f->description));
1425 		break;
1426 	default:
1427 		break;
1428 	}
1429 	return 0;
1430 }
1431 
rkispp_s_fmt_vid_mplane(struct file * file,void * priv,struct v4l2_format * f)1432 static int rkispp_s_fmt_vid_mplane(struct file *file,
1433 				       void *priv, struct v4l2_format *f)
1434 {
1435 	struct rkispp_stream *stream = video_drvdata(file);
1436 	struct video_device *vdev = &stream->vnode.vdev;
1437 	struct rkispp_vdev_node *node = vdev_to_node(vdev);
1438 	struct rkispp_device *dev = stream->isppdev;
1439 
1440 	/* Change not allowed if queue is streaming. */
1441 	if (vb2_is_streaming(&node->buf_queue)) {
1442 		v4l2_err(&dev->v4l2_dev, "%s queue busy\n", __func__);
1443 		return -EBUSY;
1444 	}
1445 
1446 	return rkispp_set_fmt(stream, &f->fmt.pix_mp, false);
1447 }
1448 
rkispp_g_fmt_vid_mplane(struct file * file,void * fh,struct v4l2_format * f)1449 static int rkispp_g_fmt_vid_mplane(struct file *file, void *fh,
1450 				       struct v4l2_format *f)
1451 {
1452 	struct rkispp_stream *stream = video_drvdata(file);
1453 
1454 	f->fmt.pix_mp = stream->out_fmt;
1455 
1456 	return 0;
1457 }
1458 
rkispp_querycap(struct file * file,void * priv,struct v4l2_capability * cap)1459 static int rkispp_querycap(struct file *file, void *priv,
1460 			   struct v4l2_capability *cap)
1461 {
1462 	struct rkispp_stream *stream = video_drvdata(file);
1463 	struct device *dev = stream->isppdev->dev;
1464 	struct video_device *vdev = video_devdata(file);
1465 
1466 	strlcpy(cap->card, vdev->name, sizeof(cap->card));
1467 	snprintf(cap->driver, sizeof(cap->driver),
1468 		 "%s_v%d", dev->driver->name,
1469 		 stream->isppdev->ispp_ver >> 4);
1470 	snprintf(cap->bus_info, sizeof(cap->bus_info),
1471 		 "platform:%s", dev_name(dev));
1472 
1473 	return 0;
1474 }
1475 
1476 static const struct v4l2_ioctl_ops rkispp_v4l2_ioctl_ops = {
1477 	.vidioc_reqbufs = vb2_ioctl_reqbufs,
1478 	.vidioc_querybuf = vb2_ioctl_querybuf,
1479 	.vidioc_create_bufs = vb2_ioctl_create_bufs,
1480 	.vidioc_qbuf = vb2_ioctl_qbuf,
1481 	.vidioc_expbuf = vb2_ioctl_expbuf,
1482 	.vidioc_dqbuf = vb2_ioctl_dqbuf,
1483 	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1484 	.vidioc_streamon = vb2_ioctl_streamon,
1485 	.vidioc_streamoff = vb2_ioctl_streamoff,
1486 	.vidioc_enum_input = rkispp_enum_input,
1487 	.vidioc_try_fmt_vid_cap_mplane = rkispp_try_fmt_vid_mplane,
1488 	.vidioc_enum_fmt_vid_cap = rkispp_enum_fmt_vid_mplane,
1489 	.vidioc_s_fmt_vid_cap_mplane = rkispp_s_fmt_vid_mplane,
1490 	.vidioc_g_fmt_vid_cap_mplane = rkispp_g_fmt_vid_mplane,
1491 	.vidioc_try_fmt_vid_out_mplane = rkispp_try_fmt_vid_mplane,
1492 	.vidioc_s_fmt_vid_out_mplane = rkispp_s_fmt_vid_mplane,
1493 	.vidioc_g_fmt_vid_out_mplane = rkispp_g_fmt_vid_mplane,
1494 	.vidioc_querycap = rkispp_querycap,
1495 };
1496 
rkispp_unregister_stream_video(struct rkispp_stream * stream)1497 static void rkispp_unregister_stream_video(struct rkispp_stream *stream)
1498 {
1499 	media_entity_cleanup(&stream->vnode.vdev.entity);
1500 	video_unregister_device(&stream->vnode.vdev);
1501 }
1502 
rkispp_register_stream_video(struct rkispp_stream * stream)1503 static int rkispp_register_stream_video(struct rkispp_stream *stream)
1504 {
1505 	struct rkispp_device *dev = stream->isppdev;
1506 	struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
1507 	struct video_device *vdev = &stream->vnode.vdev;
1508 	struct rkispp_vdev_node *node;
1509 	enum v4l2_buf_type buf_type;
1510 	int ret = 0;
1511 
1512 	node = vdev_to_node(vdev);
1513 	vdev->release = video_device_release_empty;
1514 	vdev->fops = &rkispp_fops;
1515 	vdev->minor = -1;
1516 	vdev->v4l2_dev = v4l2_dev;
1517 	vdev->lock = &dev->apilock;
1518 	video_set_drvdata(vdev, stream);
1519 
1520 	vdev->ioctl_ops = &rkispp_v4l2_ioctl_ops;
1521 	if (stream->type == STREAM_INPUT) {
1522 		vdev->device_caps = V4L2_CAP_STREAMING |
1523 			V4L2_CAP_VIDEO_OUTPUT_MPLANE;
1524 		vdev->vfl_dir = VFL_DIR_TX;
1525 		node->pad.flags = MEDIA_PAD_FL_SOURCE;
1526 		buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1527 	} else {
1528 		vdev->device_caps = V4L2_CAP_STREAMING |
1529 			V4L2_CAP_VIDEO_CAPTURE_MPLANE;
1530 		vdev->vfl_dir = VFL_DIR_RX;
1531 		node->pad.flags = MEDIA_PAD_FL_SINK;
1532 		buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1533 	}
1534 
1535 	rkispp_init_vb2_queue(&node->buf_queue, stream, buf_type);
1536 	vdev->queue = &node->buf_queue;
1537 
1538 	ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1539 	if (ret < 0) {
1540 		v4l2_err(v4l2_dev,
1541 			 "video register failed with error %d\n", ret);
1542 		return ret;
1543 	}
1544 
1545 	ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
1546 	if (ret < 0)
1547 		goto unreg;
1548 	return 0;
1549 unreg:
1550 	video_unregister_device(vdev);
1551 	return ret;
1552 }
1553 
dump_file(struct rkispp_device * dev,u32 restart_module)1554 static void dump_file(struct rkispp_device *dev, u32 restart_module)
1555 {
1556 	struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1557 	void __iomem *base = dev->hw_dev->base_addr;
1558 	struct rkispp_isp_buf_pool *buf;
1559 	struct rkispp_dummy_buffer *dummy;
1560 	struct file *fp = NULL;
1561 	char file[160], reg[48];
1562 	int i;
1563 
1564 	snprintf(file, sizeof(file), "%s/%s%d.reg",
1565 		 rkispp_dump_path, DRIVER_NAME, dev->dev_id);
1566 	fp = filp_open(file, O_RDWR | O_CREAT, 0644);
1567 	if (IS_ERR(fp)) {
1568 		v4l2_err(&dev->v4l2_dev, "%s open %s fail\n", __func__, file);
1569 		return;
1570 	}
1571 	for (i = 0; i < 0x1000; i += 16) {
1572 		snprintf(reg, sizeof(reg), "ffb6%04x:  %08x %08x %08x %08x\n",
1573 			 i, readl(base + i), readl(base + i + 4),
1574 			 readl(base + i + 8), readl(base + i + 12));
1575 		kernel_write(fp, reg, strlen(reg), &fp->f_pos);
1576 	}
1577 	filp_close(fp, NULL);
1578 
1579 	if (restart_module & MONITOR_TNR) {
1580 		if (vdev->tnr.cur_rd) {
1581 			snprintf(file, sizeof(file), "%s/%s%d_tnr_cur.fbc",
1582 				 rkispp_dump_path, DRIVER_NAME, dev->dev_id);
1583 			fp = filp_open(file, O_RDWR | O_CREAT, 0644);
1584 			if (IS_ERR(fp)) {
1585 				v4l2_err(&dev->v4l2_dev,
1586 					 "%s open %s fail\n", __func__, file);
1587 				return;
1588 			}
1589 			buf = get_pool_buf(dev, vdev->tnr.cur_rd);
1590 			kernel_write(fp, buf->vaddr[0], vdev->tnr.cur_rd->dbuf[0]->size, &fp->f_pos);
1591 			filp_close(fp, NULL);
1592 			v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1593 				 "dump tnr cur_rd dma:%pad vaddr:%p\n",
1594 				 &buf->dma[0], buf->vaddr[0]);
1595 		}
1596 
1597 		if (vdev->tnr.nxt_rd && vdev->tnr.nxt_rd != vdev->tnr.cur_rd) {
1598 			snprintf(file, sizeof(file), "%s/%s%d_tnr_nxt.fbc",
1599 				 rkispp_dump_path, DRIVER_NAME, dev->dev_id);
1600 			fp = filp_open(file, O_RDWR | O_CREAT, 0644);
1601 			if (IS_ERR(fp)) {
1602 				v4l2_err(&dev->v4l2_dev,
1603 					 "%s open %s fail\n", __func__, file);
1604 				return;
1605 			}
1606 			buf = get_pool_buf(dev, vdev->tnr.nxt_rd);
1607 			kernel_write(fp, buf->vaddr[0], vdev->tnr.nxt_rd->dbuf[0]->size, &fp->f_pos);
1608 			filp_close(fp, NULL);
1609 			v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1610 				 "dump tnr nxt_rd dma:%pad vaddr:%p\n",
1611 				 &buf->dma[0], buf->vaddr[0]);
1612 		}
1613 	}
1614 
1615 	if (!(restart_module & MONITOR_FEC)) {
1616 		for (i = 0; i < RKISPP_BUF_MAX; i++) {
1617 			dummy = &vdev->tnr.buf.wr[i][0];
1618 			if (!dummy->mem_priv)
1619 				break;
1620 			snprintf(file, sizeof(file), "%s/%s%d_iir%d.fbc",
1621 				 rkispp_dump_path, DRIVER_NAME, dev->dev_id, i);
1622 			fp = filp_open(file, O_RDWR | O_CREAT, 0644);
1623 			if (IS_ERR(fp)) {
1624 				v4l2_err(&dev->v4l2_dev,
1625 					 "%s open %s fail\n", __func__, file);
1626 				return;
1627 			}
1628 			kernel_write(fp, dummy->vaddr, dummy->size, &fp->f_pos);
1629 			filp_close(fp, NULL);
1630 			v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1631 				 "dump tnr wr dma:%pad vaddr:%p\n",
1632 				 &dummy->dma_addr, dummy->vaddr);
1633 		}
1634 	}
1635 }
1636 
restart_module(struct rkispp_device * dev)1637 static void restart_module(struct rkispp_device *dev)
1638 {
1639 	struct rkispp_monitor *monitor = &dev->stream_vdev.monitor;
1640 	void __iomem *base = dev->hw_dev->base_addr;
1641 	u32 val = 0;
1642 
1643 	monitor->retry++;
1644 	v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1645 		 "%s enter\n", __func__);
1646 	if (dev->ispp_sdev.state == ISPP_STOP || monitor->retry > 3) {
1647 		monitor->is_restart = false;
1648 		monitor->is_en = false;
1649 		goto end;
1650 	}
1651 	if (monitor->monitoring_module)
1652 		wait_for_completion_timeout(&monitor->cmpl,
1653 					    msecs_to_jiffies(500));
1654 	if (dev->ispp_sdev.state == ISPP_STOP) {
1655 		monitor->is_restart = false;
1656 		monitor->is_en = false;
1657 		goto end;
1658 	}
1659 
1660 	if (rkispp_dump_path[0] == '/')
1661 		dump_file(dev, monitor->restart_module);
1662 
1663 	if (monitor->restart_module & MONITOR_TNR && monitor->tnr.is_err) {
1664 		rkispp_set_bits(dev, RKISPP_TNR_CTRL, 0, SW_TNR_1ST_FRM);
1665 		monitor->tnr.is_err = false;
1666 	}
1667 	rkispp_soft_reset(dev->hw_dev);
1668 	rkispp_update_regs(dev, RKISPP_CTRL_QUICK, RKISPP_FEC_CROP);
1669 	writel(ALL_FORCE_UPD, base + RKISPP_CTRL_UPDATE);
1670 	if (monitor->restart_module & MONITOR_TNR) {
1671 		val |= TNR_ST;
1672 		rkispp_write(dev, RKISPP_TNR_IIR_Y_BASE,
1673 			     rkispp_read(dev, RKISPP_TNR_WR_Y_BASE));
1674 		rkispp_write(dev, RKISPP_TNR_IIR_UV_BASE,
1675 			     rkispp_read(dev, RKISPP_TNR_WR_UV_BASE));
1676 		monitor->monitoring_module |= MONITOR_TNR;
1677 		if (!completion_done(&monitor->tnr.cmpl))
1678 			complete(&monitor->tnr.cmpl);
1679 	}
1680 	if (monitor->restart_module & MONITOR_NR) {
1681 		if (monitor->nr.is_err) {
1682 			struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1683 			struct v4l2_subdev *sd = dev->ispp_sdev.remote_sd;
1684 			struct rkispp_buffer *inbuf;
1685 
1686 			if (vdev->nr.cur_rd) {
1687 				if (vdev->nr.cur_rd->is_isp) {
1688 					v4l2_subdev_call(sd, video, s_rx_buffer,
1689 							 vdev->nr.cur_rd, NULL);
1690 				} else if (!vdev->nr.cur_rd->priv) {
1691 					list_add_tail(&vdev->nr.cur_rd->list,
1692 						      &vdev->tnr.list_wr);
1693 				} else {
1694 					inbuf = vdev->nr.cur_rd->priv;
1695 					vb2_buffer_done(&inbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
1696 				}
1697 				vdev->nr.cur_rd = NULL;
1698 			}
1699 			rkispp_set_bits(dev, RKISPP_TNR_CTRL, 0, SW_TNR_1ST_FRM);
1700 			vdev->nr.is_end = true;
1701 			monitor->nr.is_err = false;
1702 			monitor->is_restart = false;
1703 			monitor->restart_module = 0;
1704 			rkispp_event_handle(dev, CMD_QUEUE_DMABUF, NULL);
1705 			goto end;
1706 		}
1707 		val |= NR_SHP_ST;
1708 		monitor->monitoring_module |= MONITOR_NR;
1709 		if (!completion_done(&monitor->nr.cmpl))
1710 			complete(&monitor->nr.cmpl);
1711 	}
1712 	if (monitor->restart_module & MONITOR_FEC) {
1713 		val |= FEC_ST;
1714 		monitor->monitoring_module |= MONITOR_FEC;
1715 		if (!completion_done(&monitor->fec.cmpl))
1716 			complete(&monitor->fec.cmpl);
1717 	}
1718 	if (!dev->hw_dev->is_shutdown)
1719 		writel(val, base + RKISPP_CTRL_STRT);
1720 	monitor->is_restart = false;
1721 	monitor->restart_module = 0;
1722 end:
1723 	v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1724 		 "%s exit en:%d cnt:%d, monitoring:0x%x\n", __func__,
1725 		 monitor->is_en, monitor->retry, monitor->monitoring_module);
1726 }
1727 
restart_monitor(struct work_struct * work)1728 static void restart_monitor(struct work_struct *work)
1729 {
1730 	struct module_monitor *m_monitor =
1731 		container_of(work, struct module_monitor, work);
1732 	struct rkispp_device *dev = m_monitor->dev;
1733 	struct rkispp_monitor *monitor = &dev->stream_vdev.monitor;
1734 	unsigned long lock_flags = 0;
1735 	long time;
1736 	int ret;
1737 
1738 	v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1739 		 "%s module:0x%x enter\n", __func__, m_monitor->module);
1740 	while (monitor->is_en) {
1741 		/* max timeout for module idle */
1742 		time = MAX_SCHEDULE_TIMEOUT;
1743 		if (monitor->monitoring_module & m_monitor->module)
1744 			time = (m_monitor->time <= 0 ? 300 : m_monitor->time) + 150;
1745 		ret = wait_for_completion_timeout(&m_monitor->cmpl,
1746 						  msecs_to_jiffies(time));
1747 		if (dev->hw_dev->is_shutdown || dev->ispp_sdev.state == ISPP_STOP)
1748 			break;
1749 		if (!(monitor->monitoring_module & m_monitor->module) ||
1750 		    ret || !monitor->is_en)
1751 			continue;
1752 		v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1753 			 "module:0x%x wait %ldms timeout ret:%d, monitoring:0x%x\n",
1754 			 m_monitor->module, time, ret, monitor->monitoring_module);
1755 
1756 		spin_lock_irqsave(&monitor->lock, lock_flags);
1757 		monitor->monitoring_module &= ~m_monitor->module;
1758 		monitor->restart_module |= m_monitor->module;
1759 		if (monitor->is_restart)
1760 			ret = true;
1761 		else
1762 			monitor->is_restart = true;
1763 		if (m_monitor->module == MONITOR_TNR) {
1764 			rkispp_write(dev, RKISPP_TNR_IIR_Y_BASE,
1765 				     readl(dev->hw_dev->base_addr + RKISPP_TNR_IIR_Y_BASE_SHD));
1766 			rkispp_write(dev, RKISPP_TNR_IIR_UV_BASE,
1767 				     readl(dev->hw_dev->base_addr + RKISPP_TNR_IIR_UV_BASE_SHD));
1768 		}
1769 		spin_unlock_irqrestore(&monitor->lock, lock_flags);
1770 		if (!ret && monitor->is_restart)
1771 			restart_module(dev);
1772 		/* waitting for other working module if need restart ispp */
1773 		if (monitor->is_restart &&
1774 		    !monitor->monitoring_module &&
1775 		    !completion_done(&monitor->cmpl))
1776 			complete(&monitor->cmpl);
1777 	}
1778 	v4l2_dbg(1, rkispp_debug, &dev->v4l2_dev,
1779 		 "%s module:0x%x exit\n", __func__, m_monitor->module);
1780 }
1781 
monitor_init(struct rkispp_device * dev)1782 static void monitor_init(struct rkispp_device *dev)
1783 {
1784 	struct rkispp_stream_vdev *vdev = &dev->stream_vdev;
1785 	struct rkispp_monitor *monitor = &vdev->monitor;
1786 
1787 	monitor->tnr.dev = dev;
1788 	monitor->nr.dev = dev;
1789 	monitor->fec.dev = dev;
1790 	monitor->tnr.module = MONITOR_TNR;
1791 	monitor->nr.module = MONITOR_NR;
1792 	monitor->fec.module = MONITOR_FEC;
1793 	INIT_WORK(&monitor->tnr.work, restart_monitor);
1794 	INIT_WORK(&monitor->nr.work, restart_monitor);
1795 	INIT_WORK(&monitor->fec.work, restart_monitor);
1796 	init_completion(&monitor->tnr.cmpl);
1797 	init_completion(&monitor->nr.cmpl);
1798 	init_completion(&monitor->fec.cmpl);
1799 	init_completion(&monitor->cmpl);
1800 	spin_lock_init(&monitor->lock);
1801 	monitor->is_restart = false;
1802 }
1803 
rkispp_fec_do_early(struct hrtimer * timer)1804 static enum hrtimer_restart rkispp_fec_do_early(struct hrtimer *timer)
1805 {
1806 	struct rkispp_stream_vdev *vdev =
1807 		container_of(timer, struct rkispp_stream_vdev, fec_qst);
1808 	struct rkispp_stream *stream = &vdev->stream[0];
1809 	struct rkispp_device *dev = stream->isppdev;
1810 	void __iomem *base = dev->hw_dev->base_addr;
1811 	enum hrtimer_restart ret = HRTIMER_NORESTART;
1812 	u32 ycnt, tile = readl(base + RKISPP_CTRL_SYS_CTL_STA0);
1813 	u32 working = readl(base + RKISPP_CTRL_SYS_STATUS);
1814 	u64 ns = ktime_get_ns();
1815 	u32 time;
1816 
1817 	working &= NR_WORKING;
1818 	tile &= NR_TILE_LINE_CNT_MASK;
1819 	ycnt = tile >> 8;
1820 	time = (u32)(ns - vdev->nr.dbg.timestamp);
1821 	if (dev->ispp_sdev.state == ISPP_STOP) {
1822 		vdev->is_done_early = false;
1823 		goto end;
1824 	} else if (working && !ycnt) {
1825 		hrtimer_forward(timer, timer->base->get_time(), ns_to_ktime(500000));
1826 		ret = HRTIMER_RESTART;
1827 	} else {
1828 		v4l2_dbg(3, rkispp_debug, &dev->v4l2_dev,
1829 			 "%s seq:%d ycnt:%d time:%dus\n",
1830 			 __func__, vdev->nr.dbg.id, ycnt * 128, time / 1000);
1831 		vdev->stream_ops->fec_work_event(dev, NULL, false, true);
1832 	}
1833 end:
1834 	return ret;
1835 }
1836 
rkispp_isr(u32 mis_val,struct rkispp_device * dev)1837 void rkispp_isr(u32 mis_val, struct rkispp_device *dev)
1838 {
1839 	struct rkispp_stream_vdev *vdev;
1840 	struct rkispp_stream *stream;
1841 	u32 i, nr_err = NR_LOST_ERR | FBCH_EMPTY_NR |
1842 		FBCD_DEC_ERR_NR | BUS_ERR_NR;
1843 	u32 tnr_err = TNR_LOST_ERR | FBCH_EMPTY_TNR |
1844 		FBCD_DEC_ERR_TNR | BUS_ERR_TNR;
1845 	u64 ns = ktime_get_ns();
1846 
1847 	v4l2_dbg(3, rkispp_debug, &dev->v4l2_dev,
1848 		 "isr:0x%x\n", mis_val);
1849 
1850 	vdev = &dev->stream_vdev;
1851 	dev->isr_cnt++;
1852 	if (mis_val & (tnr_err | nr_err)) {
1853 		if (mis_val & tnr_err)
1854 			vdev->monitor.tnr.is_err = true;
1855 		if (mis_val & nr_err)
1856 			vdev->monitor.nr.is_err = true;
1857 		dev->isr_err_cnt++;
1858 		v4l2_err(&dev->v4l2_dev,
1859 			 "ispp err:0x%x, seq:%d\n",
1860 			 mis_val, dev->ispp_sdev.frm_sync_seq);
1861 	}
1862 
1863 	if (mis_val & TNR_INT) {
1864 		if (vdev->monitor.is_en) {
1865 			vdev->monitor.monitoring_module &= ~MONITOR_TNR;
1866 			if (!completion_done(&vdev->monitor.tnr.cmpl))
1867 				complete(&vdev->monitor.tnr.cmpl);
1868 		}
1869 		vdev->tnr.dbg.interval = ns - vdev->tnr.dbg.timestamp;
1870 	}
1871 	if (mis_val & NR_INT) {
1872 		if (vdev->monitor.is_en) {
1873 			vdev->monitor.monitoring_module &= ~MONITOR_NR;
1874 			if (!completion_done(&vdev->monitor.nr.cmpl))
1875 				complete(&vdev->monitor.nr.cmpl);
1876 		}
1877 		vdev->nr.dbg.interval = ns - vdev->nr.dbg.timestamp;
1878 	}
1879 	if (mis_val & FEC_INT) {
1880 		if (vdev->monitor.is_en) {
1881 			vdev->monitor.monitoring_module &= ~MONITOR_FEC;
1882 			if (!completion_done(&vdev->monitor.fec.cmpl))
1883 				complete(&vdev->monitor.fec.cmpl);
1884 		}
1885 		vdev->fec.dbg.interval = ns - vdev->fec.dbg.timestamp;
1886 	}
1887 
1888 	if (mis_val & (CMD_TNR_ST_DONE | CMD_NR_SHP_ST_DONE) &&
1889 	    (dev->isp_mode & ISP_ISPP_QUICK))
1890 		++dev->ispp_sdev.frm_sync_seq;
1891 
1892 	if (mis_val & TNR_INT) {
1893 		if (rkispp_read(dev, RKISPP_TNR_CTRL) & SW_TNR_1ST_FRM)
1894 			rkispp_clear_bits(dev, RKISPP_TNR_CTRL, SW_TNR_1ST_FRM);
1895 		rkispp_stats_isr(&dev->stats_vdev[STATS_VDEV_TNR]);
1896 	}
1897 	if (mis_val & NR_INT)
1898 		rkispp_stats_isr(&dev->stats_vdev[STATS_VDEV_NR]);
1899 
1900 	for (i = 0; i <= STREAM_S2; i++) {
1901 		stream = &vdev->stream[i];
1902 
1903 		if (!stream->streaming || !stream->is_cfg ||
1904 		    !(mis_val & INT_FRAME(stream)))
1905 			continue;
1906 		if (stream->stopping &&
1907 		    stream->ops->is_stopped &&
1908 		    (stream->ops->is_stopped(stream) ||
1909 		     dev->ispp_sdev.state == ISPP_STOP)) {
1910 			stream->stopping = false;
1911 			stream->streaming = false;
1912 			stream->is_upd = false;
1913 			wake_up(&stream->done);
1914 		} else if (i != STREAM_II) {
1915 			rkispp_frame_end(stream, FRAME_IRQ);
1916 		}
1917 	}
1918 
1919 	if (mis_val & NR_INT && dev->hw_dev->is_first) {
1920 		dev->mis_val = mis_val;
1921 		INIT_WORK(&dev->irq_work, irq_work);
1922 		schedule_work(&dev->irq_work);
1923 	} else {
1924 		vdev->stream_ops->check_to_force_update(dev, mis_val);
1925 	}
1926 }
1927 
rkispp_register_stream_vdevs(struct rkispp_device * dev)1928 int rkispp_register_stream_vdevs(struct rkispp_device *dev)
1929 {
1930 	struct rkispp_stream_vdev *stream_vdev;
1931 	struct rkispp_stream *stream;
1932 	struct video_device *vdev;
1933 	char *vdev_name;
1934 	int i, j, ret = 0;
1935 
1936 	stream_vdev = &dev->stream_vdev;
1937 	memset(stream_vdev, 0, sizeof(*stream_vdev));
1938 	atomic_set(&stream_vdev->refcnt, 0);
1939 	INIT_LIST_HEAD(&stream_vdev->tnr.list_rd);
1940 	INIT_LIST_HEAD(&stream_vdev->tnr.list_wr);
1941 	INIT_LIST_HEAD(&stream_vdev->tnr.list_rpt);
1942 	INIT_LIST_HEAD(&stream_vdev->nr.list_rd);
1943 	INIT_LIST_HEAD(&stream_vdev->nr.list_wr);
1944 	INIT_LIST_HEAD(&stream_vdev->nr.list_rpt);
1945 	INIT_LIST_HEAD(&stream_vdev->fec.list_rd);
1946 	spin_lock_init(&stream_vdev->tnr.buf_lock);
1947 	spin_lock_init(&stream_vdev->nr.buf_lock);
1948 	spin_lock_init(&stream_vdev->fec.buf_lock);
1949 	stream_vdev->tnr.is_buf_init = false;
1950 	stream_vdev->nr.is_buf_init = false;
1951 
1952 	if (dev->ispp_ver == ISPP_V10) {
1953 		dev->stream_max = STREAM_MAX;
1954 		rkispp_stream_init_ops_v10(stream_vdev);
1955 		hrtimer_init(&stream_vdev->fec_qst, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1956 		stream_vdev->fec_qst.function = rkispp_fec_do_early;
1957 		hrtimer_init(&stream_vdev->frame_qst, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1958 		stream_vdev->frame_qst.function = stream_vdev->stream_ops->rkispp_frame_done_early;
1959 		dev->hw_dev->pool[0].group_buf_max = GROUP_BUF_MAX;
1960 	} else if (dev->ispp_ver == ISPP_V20) {
1961 		dev->stream_max = STREAM_VIR + 1;
1962 		rkispp_stream_init_ops_v20(stream_vdev);
1963 		dev->hw_dev->pool[0].group_buf_max = GROUP_BUF_GAIN;
1964 	}
1965 	for (i = 0; i < dev->stream_max; i++) {
1966 		stream = &stream_vdev->stream[i];
1967 		stream->id = i;
1968 		stream->isppdev = dev;
1969 		INIT_LIST_HEAD(&stream->buf_queue);
1970 		init_waitqueue_head(&stream->done);
1971 		spin_lock_init(&stream->vbq_lock);
1972 		vdev = &stream->vnode.vdev;
1973 		switch (i) {
1974 		case STREAM_II:
1975 			vdev_name = II_VDEV_NAME;
1976 			stream->type = STREAM_INPUT;
1977 			stream->ops = &input_stream_ops;
1978 			stream->config = &input_config;
1979 			break;
1980 		case STREAM_MB:
1981 			vdev_name = MB_VDEV_NAME;
1982 			stream->type = STREAM_OUTPUT;
1983 			stream->ops = &mb_stream_ops;
1984 			stream->config = &mb_config;
1985 			break;
1986 		case STREAM_S0:
1987 			vdev_name = S0_VDEV_NAME;
1988 			stream->type = STREAM_OUTPUT;
1989 			stream->ops = &scal_stream_ops;
1990 			stream->config = &scl0_config;
1991 			break;
1992 		case STREAM_S1:
1993 			vdev_name = S1_VDEV_NAME;
1994 			stream->type = STREAM_OUTPUT;
1995 			stream->ops = &scal_stream_ops;
1996 			stream->config = &scl1_config;
1997 			break;
1998 		case STREAM_S2:
1999 			vdev_name = S2_VDEV_NAME;
2000 			stream->type = STREAM_OUTPUT;
2001 			stream->ops = &scal_stream_ops;
2002 			stream->config = &scl2_config;
2003 			break;
2004 		case STREAM_VIR:
2005 			vdev_name = VIR_VDEV_NAME;
2006 			stream->type = STREAM_OUTPUT;
2007 			stream->config = &input_config;
2008 			stream->ops = NULL;
2009 			break;
2010 		default:
2011 			v4l2_err(&dev->v4l2_dev, "Invalid stream:%d\n", i);
2012 			return -EINVAL;
2013 		}
2014 		strlcpy(vdev->name, vdev_name, sizeof(vdev->name));
2015 		ret = rkispp_register_stream_video(stream);
2016 		if (ret < 0)
2017 			goto err;
2018 	}
2019 	monitor_init(dev);
2020 	return 0;
2021 err:
2022 	for (j = 0; j < i; j++) {
2023 		stream = &stream_vdev->stream[j];
2024 		rkispp_unregister_stream_video(stream);
2025 	}
2026 	return ret;
2027 }
2028 
rkispp_unregister_stream_vdevs(struct rkispp_device * dev)2029 void rkispp_unregister_stream_vdevs(struct rkispp_device *dev)
2030 {
2031 	struct rkispp_stream_vdev *stream_vdev;
2032 	struct rkispp_stream *stream;
2033 	int i;
2034 
2035 	stream_vdev = &dev->stream_vdev;
2036 	for (i = 0; i < dev->stream_max; i++) {
2037 		stream = &stream_vdev->stream[i];
2038 		rkispp_unregister_stream_video(stream);
2039 	}
2040 }
2041