xref: /OK3568_Linux_fs/kernel/drivers/media/platform/rockchip/cif/cif-scale.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Rockchip Electronics Co., Ltd. */
3 
4 #include <linux/kfifo.h>
5 #include <media/v4l2-common.h>
6 #include <media/v4l2-ioctl.h>
7 #include <media/videobuf2-core.h>
8 #include <media/videobuf2-vmalloc.h>
9 #include <media/videobuf2-dma-contig.h>
10 #include <media/videobuf2-dma-sg.h>
11 #include <linux/of.h>
12 #include <linux/of_gpio.h>
13 #include <linux/of_graph.h>
14 #include <linux/of_platform.h>
15 #include <linux/of_reserved_mem.h>
16 #include <media/v4l2-event.h>
17 #include "dev.h"
18 #include "regs.h"
19 #include "mipi-csi2.h"
20 #include <media/v4l2-fwnode.h>
21 #include <linux/pm_runtime.h>
22 
23 #define MEMORY_ALIGN_ROUND_UP_HEIGHT		16
24 
25 #define SCALE_MIN_WIDTH		4
26 #define SCALE_MIN_HEIGHT	4
27 #define SCALE_OUTPUT_STEP_WISE	1
28 #define CIF_SCALE_REQ_BUFS_MIN	3
29 
30 static const struct cif_output_fmt scale_out_fmts[] = {
31 	{
32 		.fourcc = V4L2_PIX_FMT_SRGGB16,
33 		.cplanes = 1,
34 		.mplanes = 1,
35 		.bpp = { 16 },
36 		.raw_bpp = 16,
37 		.fmt_type = CIF_FMT_TYPE_RAW,
38 	}, {
39 		.fourcc = V4L2_PIX_FMT_SGRBG16,
40 		.cplanes = 1,
41 		.mplanes = 1,
42 		.bpp = { 16 },
43 		.raw_bpp = 16,
44 		.fmt_type = CIF_FMT_TYPE_RAW,
45 	}, {
46 		.fourcc = V4L2_PIX_FMT_SGBRG16,
47 		.cplanes = 1,
48 		.mplanes = 1,
49 		.bpp = { 16 },
50 		.raw_bpp = 16,
51 		.fmt_type = CIF_FMT_TYPE_RAW,
52 	}, {
53 		.fourcc = V4L2_PIX_FMT_SBGGR16,
54 		.cplanes = 1,
55 		.mplanes = 1,
56 		.bpp = { 16 },
57 		.raw_bpp = 16,
58 		.fmt_type = CIF_FMT_TYPE_RAW,
59 	}
60 };
61 
rkcif_scale_enum_fmt_vid_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)62 static int rkcif_scale_enum_fmt_vid_cap(struct file *file, void *priv,
63 					struct v4l2_fmtdesc *f)
64 {
65 	const struct cif_output_fmt *fmt = NULL;
66 
67 	if (f->index >= ARRAY_SIZE(scale_out_fmts))
68 		return -EINVAL;
69 	fmt = &scale_out_fmts[f->index];
70 	f->pixelformat = fmt->fourcc;
71 	return 0;
72 }
73 
rkcif_scale_g_fmt_vid_cap_mplane(struct file * file,void * priv,struct v4l2_format * f)74 static int rkcif_scale_g_fmt_vid_cap_mplane(struct file *file, void *priv,
75 					    struct v4l2_format *f)
76 {
77 	struct rkcif_scale_vdev *scale_vdev = video_drvdata(file);
78 
79 	f->fmt.pix_mp = scale_vdev->pixm;
80 	return 0;
81 }
82 
rkcif_scale_align_bits_per_pixel(struct rkcif_device * cif_dev,const struct cif_output_fmt * fmt,int plane_index)83 static u32 rkcif_scale_align_bits_per_pixel(struct rkcif_device *cif_dev,
84 					    const struct cif_output_fmt *fmt,
85 					    int plane_index)
86 {
87 	u32 bpp = 0, i;
88 
89 	if (fmt) {
90 		switch (fmt->fourcc) {
91 		case V4L2_PIX_FMT_SBGGR16:
92 		case V4L2_PIX_FMT_SGBRG16:
93 		case V4L2_PIX_FMT_SGRBG16:
94 		case V4L2_PIX_FMT_SRGGB16:
95 			bpp = max(fmt->bpp[plane_index], (u8)CIF_RAW_STORED_BIT_WIDTH_RV1126);
96 			for (i = 1; i < 5; i++) {
97 				if (i * CIF_RAW_STORED_BIT_WIDTH_RV1126 >= bpp) {
98 					bpp = i * CIF_RAW_STORED_BIT_WIDTH_RV1126;
99 					break;
100 				}
101 			}
102 			break;
103 		default:
104 			v4l2_err(&cif_dev->v4l2_dev, "fourcc: %d is not supported!\n",
105 				 fmt->fourcc);
106 			break;
107 		}
108 	}
109 
110 	return bpp;
111 }
112 
113 
114 static const struct
rkcif_scale_find_output_fmt(u32 pixelfmt)115 cif_output_fmt *rkcif_scale_find_output_fmt(u32 pixelfmt)
116 {
117 	const struct cif_output_fmt *fmt;
118 	u32 i;
119 
120 	for (i = 0; i < ARRAY_SIZE(scale_out_fmts); i++) {
121 		fmt = &scale_out_fmts[i];
122 		if (fmt->fourcc == pixelfmt)
123 			return fmt;
124 	}
125 
126 	return NULL;
127 }
128 
rkcif_scale_set_fmt(struct rkcif_scale_vdev * scale_vdev,struct v4l2_pix_format_mplane * pixm,bool try)129 static int rkcif_scale_set_fmt(struct rkcif_scale_vdev *scale_vdev,
130 			       struct v4l2_pix_format_mplane *pixm,
131 			       bool try)
132 {
133 	struct rkcif_stream *stream = scale_vdev->stream;
134 	struct rkcif_device *cif_dev = scale_vdev->cifdev;
135 	struct v4l2_subdev_selection input_sel;
136 	struct v4l2_subdev_format fmt_src;
137 	const struct cif_output_fmt *fmt;
138 	unsigned int imagesize = 0;
139 	int bpl, size, bpp;
140 	int scale_times = 0;
141 	u32 scale_ratio = 0;
142 	u32 width = 640;
143 	u32 height = 480;
144 	int ret = 0;
145 
146 	if (!cif_dev->terminal_sensor.sd)
147 		rkcif_update_sensor_info(&cif_dev->stream[0]);
148 
149 	if (cif_dev->terminal_sensor.sd) {
150 		fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
151 		fmt_src.pad = 0;
152 		ret = v4l2_subdev_call(cif_dev->terminal_sensor.sd, pad, get_fmt, NULL, &fmt_src);
153 		if (ret) {
154 			v4l2_err(&scale_vdev->cifdev->v4l2_dev,
155 				 "%s: get sensor format failed\n", __func__);
156 			return ret;
157 		}
158 
159 		input_sel.target = V4L2_SEL_TGT_CROP_BOUNDS;
160 		input_sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
161 		input_sel.pad = 0;
162 		ret = v4l2_subdev_call(cif_dev->terminal_sensor.sd,
163 				       pad, get_selection, NULL,
164 				       &input_sel);
165 		if (!ret) {
166 			fmt_src.format.width = input_sel.r.width;
167 			fmt_src.format.height = input_sel.r.height;
168 		}
169 		scale_vdev->src_res.width = fmt_src.format.width;
170 		scale_vdev->src_res.height = fmt_src.format.height;
171 	}
172 	fmt = rkcif_scale_find_output_fmt(pixm->pixelformat);
173 	if (fmt == NULL) {
174 		v4l2_err(&scale_vdev->cifdev->v4l2_dev,
175 			"format of source channel are not bayer raw, not support scale\n");
176 		return -1;
177 	}
178 	if (scale_vdev->src_res.width && scale_vdev->src_res.height) {
179 		width = scale_vdev->src_res.width;
180 		height = scale_vdev->src_res.height;
181 	}
182 	scale_ratio = width / pixm->width;
183 	if (scale_ratio <= 8) {
184 		scale_vdev->scale_mode = SCALE_8TIMES;
185 		scale_times = 8;
186 	} else if (scale_ratio <= 16) {
187 		scale_vdev->scale_mode = SCALE_16TIMES;
188 		scale_times = 16;
189 	} else {
190 		scale_vdev->scale_mode = SCALE_32TIMES;
191 		scale_times = 32;
192 	}
193 	//source resolution align (scale_times * 2)
194 	width = ALIGN(width, scale_times * 2);
195 	pixm->width = width  / (scale_times * 2) * 2;
196 	pixm->height = height / (scale_times * 2) * 2;
197 	pixm->num_planes = fmt->mplanes;
198 	pixm->field = V4L2_FIELD_NONE;
199 	pixm->quantization = V4L2_QUANTIZATION_DEFAULT;
200 
201 	bpp = rkcif_scale_align_bits_per_pixel(cif_dev, fmt, 0);
202 	bpl = pixm->width * bpp / CIF_RAW_STORED_BIT_WIDTH_RV1126;
203 	bpl = ALIGN(bpl, 8);
204 	size = bpl * pixm->height;
205 	imagesize += size;
206 
207 	v4l2_dbg(1, rkcif_debug, &stream->cifdev->v4l2_dev,
208 		 "%s C-Plane %i size: %d, Total imagesize: %d\n",
209 		 __func__, 0, size, imagesize);
210 
211 	if (fmt->mplanes == 1) {
212 		pixm->plane_fmt[0].bytesperline = bpl;
213 		pixm->plane_fmt[0].sizeimage = imagesize;
214 	}
215 
216 	if (!try) {
217 		scale_vdev->scale_out_fmt = fmt;
218 		scale_vdev->pixm = *pixm;
219 
220 		v4l2_info(&stream->cifdev->v4l2_dev,
221 			  "%s: req(%d, %d) src out(%d, %d)\n", __func__,
222 			  pixm->width, pixm->height,
223 			  scale_vdev->src_res.width, scale_vdev->src_res.height);
224 	}
225 	return 0;
226 }
227 
rkcif_scale_s_fmt_vid_cap_mplane(struct file * file,void * priv,struct v4l2_format * f)228 static int rkcif_scale_s_fmt_vid_cap_mplane(struct file *file,
229 					    void *priv, struct v4l2_format *f)
230 {
231 	struct rkcif_scale_vdev *scale_vdev = video_drvdata(file);
232 	int ret = 0;
233 
234 	if (vb2_is_busy(&scale_vdev->vnode.buf_queue)) {
235 		v4l2_err(&scale_vdev->cifdev->v4l2_dev, "%s queue busy\n", __func__);
236 		return -EBUSY;
237 	}
238 
239 	ret = rkcif_scale_set_fmt(scale_vdev, &f->fmt.pix_mp, false);
240 
241 	return ret;
242 }
243 
rkcif_scale_querycap(struct file * file,void * priv,struct v4l2_capability * cap)244 static int rkcif_scale_querycap(struct file *file,
245 				void *priv, struct v4l2_capability *cap)
246 {
247 	struct rkcif_scale_vdev *scale_vdev = video_drvdata(file);
248 	struct device *dev = scale_vdev->cifdev->dev;
249 
250 	strscpy(cap->driver, dev->driver->name, sizeof(cap->driver));
251 	strscpy(cap->card, dev->driver->name, sizeof(cap->card));
252 	snprintf(cap->bus_info, sizeof(cap->bus_info),
253 		 "platform:%s", dev_name(dev));
254 	return 0;
255 }
256 
rkcif_scale_ioctl_default(struct file * file,void * fh,bool valid_prio,unsigned int cmd,void * arg)257 static long rkcif_scale_ioctl_default(struct file *file, void *fh,
258 				    bool valid_prio, unsigned int cmd, void *arg)
259 {
260 	struct rkcif_scale_vdev *scale_vdev = video_drvdata(file);
261 	struct rkcif_device *dev = scale_vdev->cifdev;
262 	struct bayer_blc *pblc;
263 
264 	switch (cmd) {
265 	case RKCIF_CMD_GET_SCALE_BLC:
266 		pblc = (struct bayer_blc *)arg;
267 		*pblc = scale_vdev->blc;
268 		v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev, "get scale blc %d %d %d %d\n",
269 			 pblc->pattern00, pblc->pattern01, pblc->pattern02, pblc->pattern03);
270 		break;
271 	case RKCIF_CMD_SET_SCALE_BLC:
272 		pblc = (struct bayer_blc *)arg;
273 		scale_vdev->blc = *pblc;
274 		v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev, "set scale blc %d %d %d %d\n",
275 			 pblc->pattern00, pblc->pattern01, pblc->pattern02, pblc->pattern03);
276 		break;
277 	default:
278 		return -EINVAL;
279 	}
280 
281 	return 0;
282 }
283 
rkcif_scale_enum_input(struct file * file,void * priv,struct v4l2_input * input)284 static int rkcif_scale_enum_input(struct file *file, void *priv,
285 				  struct v4l2_input *input)
286 {
287 
288 	if (input->index > 0)
289 		return -EINVAL;
290 
291 	input->type = V4L2_INPUT_TYPE_CAMERA;
292 	strscpy(input->name, "Camera", sizeof(input->name));
293 
294 	return 0;
295 }
296 
rkcif_scale_try_fmt_vid_cap_mplane(struct file * file,void * fh,struct v4l2_format * f)297 static int rkcif_scale_try_fmt_vid_cap_mplane(struct file *file, void *fh,
298 					      struct v4l2_format *f)
299 {
300 	struct rkcif_scale_vdev *scale_vdev = video_drvdata(file);
301 	int ret = 0;
302 
303 	ret = rkcif_scale_set_fmt(scale_vdev, &f->fmt.pix_mp, true);
304 
305 	return ret;
306 }
307 
rkcif_scale_enum_frameintervals(struct file * file,void * fh,struct v4l2_frmivalenum * fival)308 static int rkcif_scale_enum_frameintervals(struct file *file, void *fh,
309 					   struct v4l2_frmivalenum *fival)
310 {
311 	struct rkcif_scale_vdev *scale_vdev = video_drvdata(file);
312 	struct rkcif_device *dev = scale_vdev->cifdev;
313 	struct rkcif_sensor_info *sensor = &dev->terminal_sensor;
314 	struct v4l2_subdev_frame_interval fi;
315 	int ret;
316 
317 	if (fival->index != 0)
318 		return -EINVAL;
319 
320 	if (!sensor || !sensor->sd) {
321 		/* TODO: active_sensor is NULL if using DMARX path */
322 		v4l2_err(&dev->v4l2_dev, "%s Not active sensor\n", __func__);
323 		return -ENODEV;
324 	}
325 
326 	ret = v4l2_subdev_call(sensor->sd, video, g_frame_interval, &fi);
327 	if (ret && ret != -ENOIOCTLCMD) {
328 		return ret;
329 	} else if (ret == -ENOIOCTLCMD) {
330 		/* Set a default value for sensors not implements ioctl */
331 		fi.interval.numerator = 1;
332 		fi.interval.denominator = 30;
333 	}
334 
335 	fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
336 	fival->stepwise.step.numerator = 1;
337 	fival->stepwise.step.denominator = 1;
338 	fival->stepwise.max.numerator = 1;
339 	fival->stepwise.max.denominator = 1;
340 	fival->stepwise.min.numerator = fi.interval.numerator;
341 	fival->stepwise.min.denominator = fi.interval.denominator;
342 
343 	return 0;
344 }
345 
rkcif_scale_enum_framesizes(struct file * file,void * prov,struct v4l2_frmsizeenum * fsize)346 static int rkcif_scale_enum_framesizes(struct file *file, void *prov,
347 				       struct v4l2_frmsizeenum *fsize)
348 {
349 	struct v4l2_frmsize_discrete *s = &fsize->discrete;
350 	struct rkcif_scale_vdev *scale_vdev = video_drvdata(file);
351 	struct rkcif_device *dev = scale_vdev->cifdev;
352 	struct v4l2_rect input_rect;
353 	struct rkcif_sensor_info *terminal_sensor = &dev->terminal_sensor;
354 	struct csi_channel_info csi_info;
355 	int scale_times = 0;
356 
357 	if (fsize->index >= RKCIF_SCALE_ENUM_SIZE_MAX)
358 		return -EINVAL;
359 
360 	if (!rkcif_scale_find_output_fmt(fsize->pixel_format))
361 		return -EINVAL;
362 
363 	input_rect.width = RKCIF_DEFAULT_WIDTH;
364 	input_rect.height = RKCIF_DEFAULT_HEIGHT;
365 
366 	if (terminal_sensor && terminal_sensor->sd)
367 		rkcif_get_input_fmt(dev,
368 				    &input_rect, 0, &csi_info);
369 
370 	switch (fsize->index) {
371 	case SCALE_8TIMES:
372 		scale_times = 8;
373 		break;
374 	case SCALE_16TIMES:
375 		scale_times = 16;
376 		break;
377 	case SCALE_32TIMES:
378 		scale_times = 32;
379 		break;
380 	default:
381 		scale_times = 32;
382 		break;
383 	}
384 	fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
385 	s->width = input_rect.width  / (scale_times * 2) * 2;
386 	s->height = input_rect.height / (scale_times * 2) * 2;
387 
388 	return 0;
389 }
390 
391 /* ISP video device IOCTLs */
392 static const struct v4l2_ioctl_ops rkcif_scale_ioctl = {
393 	.vidioc_reqbufs = vb2_ioctl_reqbufs,
394 	.vidioc_querybuf = vb2_ioctl_querybuf,
395 	.vidioc_create_bufs = vb2_ioctl_create_bufs,
396 	.vidioc_qbuf = vb2_ioctl_qbuf,
397 	.vidioc_dqbuf = vb2_ioctl_dqbuf,
398 	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
399 	.vidioc_expbuf = vb2_ioctl_expbuf,
400 	.vidioc_streamon = vb2_ioctl_streamon,
401 	.vidioc_streamoff = vb2_ioctl_streamoff,
402 	.vidioc_enum_input = rkcif_scale_enum_input,
403 	.vidioc_enum_fmt_vid_cap = rkcif_scale_enum_fmt_vid_cap,
404 	.vidioc_g_fmt_vid_cap_mplane = rkcif_scale_g_fmt_vid_cap_mplane,
405 	.vidioc_s_fmt_vid_cap_mplane = rkcif_scale_s_fmt_vid_cap_mplane,
406 	.vidioc_try_fmt_vid_cap_mplane = rkcif_scale_try_fmt_vid_cap_mplane,
407 	.vidioc_querycap = rkcif_scale_querycap,
408 	.vidioc_enum_frameintervals = rkcif_scale_enum_frameintervals,
409 	.vidioc_enum_framesizes = rkcif_scale_enum_framesizes,
410 	.vidioc_default = rkcif_scale_ioctl_default,
411 };
412 
rkcif_scale_fh_open(struct file * file)413 static int rkcif_scale_fh_open(struct file *file)
414 {
415 	struct video_device *vdev = video_devdata(file);
416 	struct rkcif_vdev_node *vnode = vdev_to_node(vdev);
417 	struct rkcif_scale_vdev *scale_vdev = to_rkcif_scale_vdev(vnode);
418 	struct rkcif_device *cifdev = scale_vdev->cifdev;
419 	int ret;
420 
421 	ret = rkcif_update_sensor_info(scale_vdev->stream);
422 	if (ret < 0) {
423 		v4l2_err(vdev,
424 			 "update sensor info failed %d\n",
425 			 ret);
426 
427 		return ret;
428 	}
429 
430 	ret = pm_runtime_resume_and_get(cifdev->dev);
431 	if (ret < 0)
432 		v4l2_err(&cifdev->v4l2_dev, "Failed to get runtime pm, %d\n",
433 			 ret);
434 
435 	ret = v4l2_fh_open(file);
436 	if (!ret) {
437 		ret = v4l2_pipeline_pm_get(&vnode->vdev.entity);
438 		if (ret < 0)
439 			vb2_fop_release(file);
440 	}
441 
442 	return ret;
443 }
444 
rkcif_scale_fop_release(struct file * file)445 static int rkcif_scale_fop_release(struct file *file)
446 {
447 	struct video_device *vdev = video_devdata(file);
448 	struct rkcif_vdev_node *vnode = vdev_to_node(vdev);
449 	struct rkcif_scale_vdev *scale_vdev = to_rkcif_scale_vdev(vnode);
450 	struct rkcif_device *cifdev = scale_vdev->cifdev;
451 	int ret;
452 
453 	ret = vb2_fop_release(file);
454 	if (!ret)
455 		v4l2_pipeline_pm_put(&vnode->vdev.entity);
456 
457 	pm_runtime_put_sync(cifdev->dev);
458 	return ret;
459 }
460 
461 struct v4l2_file_operations rkcif_scale_fops = {
462 	.mmap = vb2_fop_mmap,
463 	.unlocked_ioctl = video_ioctl2,
464 	.poll = vb2_fop_poll,
465 	.open = rkcif_scale_fh_open,
466 	.release = rkcif_scale_fop_release
467 };
468 
rkcif_scale_vb2_queue_setup(struct vb2_queue * queue,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_ctxs[])469 static int rkcif_scale_vb2_queue_setup(struct vb2_queue *queue,
470 				       unsigned int *num_buffers,
471 				       unsigned int *num_planes,
472 				       unsigned int sizes[],
473 				       struct device *alloc_ctxs[])
474 {
475 	struct rkcif_scale_vdev *scale_vdev = queue->drv_priv;
476 	struct rkcif_device *cif_dev = scale_vdev->cifdev;
477 	const struct v4l2_pix_format_mplane *pixm = NULL;
478 	const struct cif_output_fmt *cif_fmt;
479 	u32 i;
480 	const struct v4l2_plane_pix_format *plane_fmt;
481 
482 	pixm = &scale_vdev->pixm;
483 	cif_fmt = scale_vdev->scale_out_fmt;
484 	*num_planes = cif_fmt->mplanes;
485 
486 	for (i = 0; i < cif_fmt->mplanes; i++) {
487 		plane_fmt = &pixm->plane_fmt[i];
488 		sizes[i] = plane_fmt->sizeimage;
489 	}
490 
491 	v4l2_dbg(1, rkcif_debug, &cif_dev->v4l2_dev, "%s count %d, size %d\n",
492 		 v4l2_type_names[queue->type], *num_buffers, sizes[0]);
493 	return 0;
494 
495 }
496 
rkcif_scale_vb2_buf_queue(struct vb2_buffer * vb)497 static void rkcif_scale_vb2_buf_queue(struct vb2_buffer *vb)
498 {
499 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
500 	struct rkcif_buffer *cifbuf = to_rkcif_buffer(vbuf);
501 	struct vb2_queue *queue = vb->vb2_queue;
502 	struct rkcif_scale_vdev *scale_vdev = queue->drv_priv;
503 	struct v4l2_pix_format_mplane *pixm = &scale_vdev->pixm;
504 	const struct cif_output_fmt *fmt = scale_vdev->scale_out_fmt;
505 	struct rkcif_hw *hw_dev = scale_vdev->cifdev->hw_dev;
506 	unsigned long lock_flags = 0;
507 	int i;
508 
509 	memset(cifbuf->buff_addr, 0, sizeof(cifbuf->buff_addr));
510 	/* If mplanes > 1, every c-plane has its own m-plane,
511 	 * otherwise, multiple c-planes are in the same m-plane
512 	 */
513 	for (i = 0; i < fmt->mplanes; i++) {
514 		void *addr = vb2_plane_vaddr(vb, i);
515 
516 		if (hw_dev->is_dma_sg_ops) {
517 			struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, i);
518 
519 			cifbuf->buff_addr[i] = sg_dma_address(sgt->sgl);
520 		} else {
521 			cifbuf->buff_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
522 		}
523 		if (rkcif_debug && addr && !hw_dev->iommu_en) {
524 			memset(addr, 0, pixm->plane_fmt[i].sizeimage);
525 			v4l2_dbg(3, rkcif_debug, &scale_vdev->cifdev->v4l2_dev,
526 				 "Clear buffer, size: 0x%08x\n",
527 				 pixm->plane_fmt[i].sizeimage);
528 		}
529 	}
530 
531 	if (fmt->mplanes == 1) {
532 		for (i = 0; i < fmt->cplanes - 1; i++)
533 			cifbuf->buff_addr[i + 1] = cifbuf->buff_addr[i] +
534 				pixm->plane_fmt[i].bytesperline * pixm->height;
535 	}
536 	spin_lock_irqsave(&scale_vdev->vbq_lock, lock_flags);
537 	list_add_tail(&cifbuf->queue, &scale_vdev->buf_head);
538 	spin_unlock_irqrestore(&scale_vdev->vbq_lock, lock_flags);
539 }
540 
rkcif_scale_stop(struct rkcif_scale_vdev * scale_vdev)541 static int rkcif_scale_stop(struct rkcif_scale_vdev *scale_vdev)
542 {
543 	struct rkcif_device *dev = scale_vdev->cifdev;
544 	int ch = scale_vdev->ch;
545 
546 	rkcif_write_register_and(dev, CIF_REG_SCL_CH_CTRL,
547 				 ~(CIF_SCALE_EN(ch) |
548 				 CIF_SCALE_SW_SRC_CH(0x1f, ch) |
549 				 CIF_SCALE_SW_MODE(0x03, ch)));
550 	scale_vdev->state = RKCIF_STATE_READY;
551 	scale_vdev->frame_idx = 0;
552 	return 0;
553 }
554 
rkcif_scale_vb2_stop_streaming(struct vb2_queue * vq)555 static void rkcif_scale_vb2_stop_streaming(struct vb2_queue *vq)
556 {
557 	struct rkcif_scale_vdev *scale_vdev = vq->drv_priv;
558 	struct rkcif_device *dev = scale_vdev->cifdev;
559 	struct rkcif_buffer *buf = NULL;
560 	int ret = 0;
561 
562 	mutex_lock(&dev->scale_lock);
563 	/* Make sure no new work queued in isr before draining wq */
564 	scale_vdev->stopping = true;
565 	ret = wait_event_timeout(scale_vdev->wq_stopped,
566 				 scale_vdev->state != RKCIF_STATE_STREAMING,
567 				 msecs_to_jiffies(1000));
568 	if (!ret) {
569 		rkcif_scale_stop(scale_vdev);
570 		scale_vdev->stopping = false;
571 	}
572 	/* release buffers */
573 	if (scale_vdev->curr_buf)
574 		list_add_tail(&scale_vdev->curr_buf->queue, &scale_vdev->buf_head);
575 
576 	if (scale_vdev->next_buf &&
577 	    scale_vdev->next_buf != scale_vdev->curr_buf)
578 		list_add_tail(&scale_vdev->next_buf->queue, &scale_vdev->buf_head);
579 	scale_vdev->curr_buf = NULL;
580 	scale_vdev->next_buf = NULL;
581 	while (!list_empty(&scale_vdev->buf_head)) {
582 		buf = list_first_entry(&scale_vdev->buf_head,
583 				       struct rkcif_buffer, queue);
584 		list_del(&buf->queue);
585 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
586 	}
587 	mutex_unlock(&dev->scale_lock);
588 }
589 
rkcif_scale_channel_init(struct rkcif_scale_vdev * scale_vdev)590 static int rkcif_scale_channel_init(struct rkcif_scale_vdev *scale_vdev)
591 {
592 	struct rkcif_device *cif_dev = scale_vdev->cifdev;
593 	struct rkcif_scale_ch_info *ch_info = &scale_vdev->ch_info;
594 	struct v4l2_pix_format_mplane pixm = scale_vdev->pixm;
595 	const struct cif_output_fmt *fmt = scale_vdev->scale_out_fmt;
596 
597 	if (cif_dev->inf_id == RKCIF_DVP)
598 		scale_vdev->ch_src = SCALE_DVP;
599 	else
600 		scale_vdev->ch_src = 4 * cif_dev->csi_host_idx + scale_vdev->ch;
601 	ch_info->width = pixm.width;
602 	ch_info->height = pixm.height;
603 	ch_info->vir_width = ALIGN(ch_info->width  * fmt->bpp[0] / 8, 8);
604 	return 0;
605 }
606 
get_reg_index_of_scale_vlw(int ch)607 static enum cif_reg_index get_reg_index_of_scale_vlw(int ch)
608 {
609 	enum cif_reg_index index;
610 
611 	switch (ch) {
612 	case 0:
613 		index = CIF_REG_SCL_VLW_CH0;
614 		break;
615 	case 1:
616 		index = CIF_REG_SCL_VLW_CH1;
617 		break;
618 	case 2:
619 		index = CIF_REG_SCL_VLW_CH2;
620 		break;
621 	case 3:
622 		index = CIF_REG_SCL_VLW_CH3;
623 		break;
624 	default:
625 		index = CIF_REG_SCL_VLW_CH0;
626 		break;
627 	}
628 
629 	return index;
630 }
631 
get_reg_index_of_scale_frm0_addr(int channel_id)632 static enum cif_reg_index get_reg_index_of_scale_frm0_addr(int channel_id)
633 {
634 	enum cif_reg_index index;
635 
636 	switch (channel_id) {
637 	case 0:
638 		index = CIF_REG_SCL_FRM0_ADDR_CH0;
639 		break;
640 	case 1:
641 		index = CIF_REG_SCL_FRM0_ADDR_CH1;
642 		break;
643 	case 2:
644 		index = CIF_REG_SCL_FRM0_ADDR_CH2;
645 		break;
646 	case 3:
647 		index = CIF_REG_SCL_FRM0_ADDR_CH3;
648 		break;
649 	default:
650 		index = CIF_REG_SCL_FRM0_ADDR_CH0;
651 		break;
652 	}
653 
654 	return index;
655 }
656 
get_reg_index_of_scale_frm1_addr(int channel_id)657 static enum cif_reg_index get_reg_index_of_scale_frm1_addr(int channel_id)
658 {
659 	enum cif_reg_index index;
660 
661 	switch (channel_id) {
662 	case 0:
663 		index = CIF_REG_SCL_FRM1_ADDR_CH0;
664 		break;
665 	case 1:
666 		index = CIF_REG_SCL_FRM1_ADDR_CH1;
667 		break;
668 	case 2:
669 		index = CIF_REG_SCL_FRM1_ADDR_CH2;
670 		break;
671 	case 3:
672 		index = CIF_REG_SCL_FRM1_ADDR_CH3;
673 		break;
674 	default:
675 		index = CIF_REG_SCL_FRM1_ADDR_CH0;
676 		break;
677 	}
678 
679 	return index;
680 }
681 
rkcif_assign_scale_buffer_init(struct rkcif_scale_vdev * scale_vdev,int ch)682 static void rkcif_assign_scale_buffer_init(struct rkcif_scale_vdev *scale_vdev,
683 					   int ch)
684 {
685 	struct rkcif_device *dev = scale_vdev->stream->cifdev;
686 	u32 frm0_addr;
687 	u32 frm1_addr;
688 	unsigned long flags;
689 
690 	frm0_addr = get_reg_index_of_scale_frm0_addr(ch);
691 	frm1_addr = get_reg_index_of_scale_frm1_addr(ch);
692 
693 	spin_lock_irqsave(&scale_vdev->vbq_lock, flags);
694 
695 	if (!scale_vdev->curr_buf) {
696 		if (!list_empty(&scale_vdev->buf_head)) {
697 			scale_vdev->curr_buf = list_first_entry(&scale_vdev->buf_head,
698 							    struct rkcif_buffer,
699 							    queue);
700 			list_del(&scale_vdev->curr_buf->queue);
701 		}
702 	}
703 
704 	if (scale_vdev->curr_buf)
705 		rkcif_write_register(dev, frm0_addr,
706 				     scale_vdev->curr_buf->buff_addr[RKCIF_PLANE_Y]);
707 
708 	if (!scale_vdev->next_buf) {
709 		if (!list_empty(&scale_vdev->buf_head)) {
710 			scale_vdev->next_buf = list_first_entry(&scale_vdev->buf_head,
711 							    struct rkcif_buffer, queue);
712 			list_del(&scale_vdev->next_buf->queue);
713 		}
714 	}
715 
716 	if (scale_vdev->next_buf)
717 		rkcif_write_register(dev, frm1_addr,
718 				     scale_vdev->next_buf->buff_addr[RKCIF_PLANE_Y]);
719 
720 	spin_unlock_irqrestore(&scale_vdev->vbq_lock, flags);
721 }
722 
rkcif_assign_scale_buffer_update(struct rkcif_scale_vdev * scale_vdev,int channel_id)723 static int rkcif_assign_scale_buffer_update(struct rkcif_scale_vdev *scale_vdev,
724 					    int channel_id)
725 {
726 	struct rkcif_device *dev = scale_vdev->cifdev;
727 	struct rkcif_buffer *buffer = NULL;
728 	u32 frm_addr;
729 	int ret = 0;
730 	unsigned long flags;
731 
732 	frm_addr = scale_vdev->frame_phase & CIF_CSI_FRAME0_READY ?
733 		   get_reg_index_of_scale_frm0_addr(channel_id) :
734 		   get_reg_index_of_scale_frm1_addr(channel_id);
735 
736 	spin_lock_irqsave(&scale_vdev->vbq_lock, flags);
737 	if (!list_empty(&scale_vdev->buf_head)) {
738 		if (scale_vdev->frame_phase == CIF_CSI_FRAME0_READY) {
739 			scale_vdev->curr_buf = list_first_entry(&scale_vdev->buf_head,
740 							    struct rkcif_buffer, queue);
741 			if (scale_vdev->curr_buf) {
742 				list_del(&scale_vdev->curr_buf->queue);
743 				buffer = scale_vdev->curr_buf;
744 			}
745 		} else if (scale_vdev->frame_phase == CIF_CSI_FRAME1_READY) {
746 			scale_vdev->next_buf = list_first_entry(&scale_vdev->buf_head,
747 							    struct rkcif_buffer, queue);
748 			if (scale_vdev->next_buf) {
749 				list_del(&scale_vdev->next_buf->queue);
750 				buffer = scale_vdev->next_buf;
751 			}
752 		}
753 	} else {
754 		buffer = NULL;
755 	}
756 	spin_unlock_irqrestore(&scale_vdev->vbq_lock, flags);
757 
758 	if (buffer) {
759 		rkcif_write_register(dev, frm_addr,
760 				     buffer->buff_addr[RKCIF_PLANE_Y]);
761 	} else {
762 		ret = -EINVAL;
763 		v4l2_info(&dev->v4l2_dev,
764 			 "not active buffer,skip frame, scale ch[%d]\n",
765 			  scale_vdev->ch);
766 	}
767 	return ret;
768 }
769 
rkcif_assign_scale_buffer_pingpong(struct rkcif_scale_vdev * scale_vdev,int init,int channel_id)770 static int rkcif_assign_scale_buffer_pingpong(struct rkcif_scale_vdev *scale_vdev,
771 					      int init, int channel_id)
772 {
773 	int ret = 0;
774 
775 	if (init)
776 		rkcif_assign_scale_buffer_init(scale_vdev, channel_id);
777 	else
778 		ret = rkcif_assign_scale_buffer_update(scale_vdev, channel_id);
779 	return ret;
780 }
781 
rkcif_scale_channel_set(struct rkcif_scale_vdev * scale_vdev)782 static int rkcif_scale_channel_set(struct rkcif_scale_vdev *scale_vdev)
783 {
784 	struct rkcif_device *dev = scale_vdev->cifdev;
785 	u32 val = 0;
786 	u32 ch  = scale_vdev->ch;
787 
788 	val = rkcif_read_register(dev, CIF_REG_SCL_CH_CTRL);
789 	if (val & CIF_SCALE_EN(ch)) {
790 		v4l2_err(&dev->v4l2_dev, "scale_vdev[%d] has been used by other device\n", ch);
791 		return -EINVAL;
792 	}
793 
794 	rkcif_assign_scale_buffer_pingpong(scale_vdev,
795 					   RKCIF_YUV_ADDR_STATE_INIT,
796 					   ch);
797 	rkcif_write_register_or(dev, CIF_REG_SCL_CTRL, SCALE_SOFT_RESET(scale_vdev->ch));
798 
799 	rkcif_write_register_and(dev, CIF_REG_GLB_INTST,
800 				 ~(SCALE_END_INTSTAT(ch) |
801 				 SCALE_FIFO_OVERFLOW(ch)));
802 	rkcif_write_register_or(dev, CIF_REG_GLB_INTEN,
803 				(SCALE_END_INTSTAT(ch) |
804 				SCALE_FIFO_OVERFLOW(ch) |
805 				SCALE_TOISP_AXI0_ERR |
806 				SCALE_TOISP_AXI1_ERR));
807 	val = CIF_SCALE_SW_PRESS_ENABLE |
808 	      CIF_SCALE_SW_PRESS_VALUE(7) |
809 	      CIF_SCALE_SW_HURRY_ENABLE |
810 	      CIF_SCALE_SW_HURRY_VALUE(7) |
811 	      CIF_SCALE_SW_WATER_LINE(1);
812 
813 	rkcif_write_register(dev, CIF_REG_SCL_CTRL, val);
814 	val = scale_vdev->blc.pattern00 |
815 	      (scale_vdev->blc.pattern01 << 8) |
816 	      (scale_vdev->blc.pattern02 << 16) |
817 	      (scale_vdev->blc.pattern03 << 24);
818 	rkcif_write_register(dev, CIF_REG_SCL_BLC_CH0 + ch,
819 			     val);
820 	rkcif_write_register(dev, get_reg_index_of_scale_vlw(ch),
821 			     scale_vdev->ch_info.vir_width);
822 	val = CIF_SCALE_SW_SRC_CH(scale_vdev->ch_src, ch) |
823 	      CIF_SCALE_SW_MODE(scale_vdev->scale_mode, ch) |
824 	      CIF_SCALE_EN(ch);
825 	rkcif_write_register_or(dev, CIF_REG_SCL_CH_CTRL,
826 				val);
827 	return 0;
828 }
829 
830 
rkcif_scale_start(struct rkcif_scale_vdev * scale_vdev)831 int rkcif_scale_start(struct rkcif_scale_vdev *scale_vdev)
832 {
833 	int ret = 0;
834 	struct rkcif_device *dev = scale_vdev->cifdev;
835 	struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
836 
837 	mutex_lock(&dev->scale_lock);
838 	if (scale_vdev->state == RKCIF_STATE_STREAMING) {
839 		ret = -EBUSY;
840 		v4l2_err(v4l2_dev, "stream in busy state\n");
841 		goto destroy_buf;
842 	}
843 
844 	rkcif_scale_channel_init(scale_vdev);
845 	ret = rkcif_scale_channel_set(scale_vdev);
846 	if (ret)
847 		goto destroy_buf;
848 	scale_vdev->frame_idx = 0;
849 	scale_vdev->state = RKCIF_STATE_STREAMING;
850 	mutex_unlock(&dev->scale_lock);
851 	return 0;
852 
853 destroy_buf:
854 	if (scale_vdev->next_buf)
855 		vb2_buffer_done(&scale_vdev->next_buf->vb.vb2_buf,
856 				VB2_BUF_STATE_QUEUED);
857 	if (scale_vdev->curr_buf)
858 		vb2_buffer_done(&scale_vdev->curr_buf->vb.vb2_buf,
859 				VB2_BUF_STATE_QUEUED);
860 	while (!list_empty(&scale_vdev->buf_head)) {
861 		struct rkcif_buffer *buf;
862 
863 		buf = list_first_entry(&scale_vdev->buf_head,
864 				       struct rkcif_buffer, queue);
865 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
866 		list_del(&buf->queue);
867 	}
868 	mutex_unlock(&dev->scale_lock);
869 	return ret;
870 }
871 
872 static int
rkcif_scale_vb2_start_streaming(struct vb2_queue * queue,unsigned int count)873 rkcif_scale_vb2_start_streaming(struct vb2_queue *queue,
874 				unsigned int count)
875 {
876 	struct rkcif_scale_vdev *scale_vdev = queue->drv_priv;
877 	struct rkcif_stream *stream = scale_vdev->stream;
878 	int ret = 0;
879 
880 	if (stream->state == RKCIF_STATE_STREAMING) {
881 		stream->to_en_scale = true;
882 	} else {
883 		ret = rkcif_scale_start(scale_vdev);
884 		if (ret)
885 			return ret;
886 	}
887 
888 	return 0;
889 }
890 
891 static struct vb2_ops rkcif_scale_vb2_ops = {
892 	.queue_setup = rkcif_scale_vb2_queue_setup,
893 	.buf_queue = rkcif_scale_vb2_buf_queue,
894 	.wait_prepare = vb2_ops_wait_prepare,
895 	.wait_finish = vb2_ops_wait_finish,
896 	.stop_streaming = rkcif_scale_vb2_stop_streaming,
897 	.start_streaming = rkcif_scale_vb2_start_streaming,
898 };
899 
rkcif_scale_init_vb2_queue(struct vb2_queue * q,struct rkcif_scale_vdev * scale_vdev,enum v4l2_buf_type buf_type)900 static int rkcif_scale_init_vb2_queue(struct vb2_queue *q,
901 				      struct rkcif_scale_vdev *scale_vdev,
902 				      enum v4l2_buf_type buf_type)
903 {
904 	struct rkcif_hw *hw_dev = scale_vdev->cifdev->hw_dev;
905 
906 	q->type = buf_type;
907 	q->io_modes = VB2_MMAP | VB2_DMABUF;
908 	q->drv_priv = scale_vdev;
909 	q->ops = &rkcif_scale_vb2_ops;
910 	q->mem_ops = hw_dev->mem_ops;
911 	q->buf_struct_size = sizeof(struct rkcif_buffer);
912 	q->min_buffers_needed = CIF_SCALE_REQ_BUFS_MIN;
913 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
914 	q->lock = &scale_vdev->vnode.vlock;
915 	q->dev = hw_dev->dev;
916 	q->allow_cache_hints = 1;
917 	q->bidirectional = 1;
918 	q->gfp_flags = GFP_DMA32;
919 	if (hw_dev->is_dma_contig)
920 		q->dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
921 	return vb2_queue_init(q);
922 }
923 
924 
rkcif_scale_g_ch(struct v4l2_device * v4l2_dev,unsigned int intstat)925 static int rkcif_scale_g_ch(struct v4l2_device *v4l2_dev,
926 			    unsigned int intstat)
927 {
928 	if (intstat & SCALE_END_INTSTAT(0)) {
929 		if ((intstat & SCALE_END_INTSTAT(0)) ==
930 		    SCALE_END_INTSTAT(0))
931 			v4l2_warn(v4l2_dev, "frame0/1 trigger simultaneously in CH0\n");
932 		return RKCIF_SCALE_CH0;
933 	}
934 
935 	if (intstat & SCALE_END_INTSTAT(1)) {
936 		if ((intstat & SCALE_END_INTSTAT(1)) ==
937 		    SCALE_END_INTSTAT(1))
938 			v4l2_warn(v4l2_dev, "frame0/1 trigger simultaneously in CH1\n");
939 		return RKCIF_SCALE_CH1;
940 	}
941 
942 	if (intstat & SCALE_END_INTSTAT(2)) {
943 		if ((intstat & SCALE_END_INTSTAT(2)) ==
944 		    SCALE_END_INTSTAT(2))
945 			v4l2_warn(v4l2_dev, "frame0/1 trigger simultaneously in CH2\n");
946 		return RKCIF_SCALE_CH2;
947 	}
948 
949 	if (intstat & SCALE_END_INTSTAT(3)) {
950 		if ((intstat & SCALE_END_INTSTAT(3)) ==
951 		    SCALE_END_INTSTAT(3))
952 			v4l2_warn(v4l2_dev, "frame0/1 trigger simultaneously in CH3\n");
953 		return RKCIF_SCALE_CH3;
954 	}
955 
956 	return -EINVAL;
957 }
958 
rkcif_scale_vb_done_oneframe(struct rkcif_scale_vdev * scale_vdev,struct vb2_v4l2_buffer * vb_done)959 static void rkcif_scale_vb_done_oneframe(struct rkcif_scale_vdev *scale_vdev,
960 					 struct vb2_v4l2_buffer *vb_done)
961 {
962 	const struct cif_output_fmt *fmt = scale_vdev->scale_out_fmt;
963 	u32 i;
964 
965 	/* Dequeue a filled buffer */
966 	for (i = 0; i < fmt->mplanes; i++) {
967 		vb2_set_plane_payload(&vb_done->vb2_buf, i,
968 				      scale_vdev->pixm.plane_fmt[i].sizeimage);
969 	}
970 
971 	vb_done->vb2_buf.timestamp = ktime_get_ns();
972 
973 	vb2_buffer_done(&vb_done->vb2_buf, VB2_BUF_STATE_DONE);
974 }
975 
rkcif_scale_update_stream(struct rkcif_scale_vdev * scale_vdev,int ch)976 static void rkcif_scale_update_stream(struct rkcif_scale_vdev *scale_vdev, int ch)
977 {
978 	struct rkcif_buffer *active_buf = NULL;
979 	struct vb2_v4l2_buffer *vb_done = NULL;
980 	int ret = 0;
981 
982 	if (scale_vdev->frame_phase & CIF_CSI_FRAME0_READY) {
983 		if (scale_vdev->curr_buf)
984 			active_buf = scale_vdev->curr_buf;
985 	} else if (scale_vdev->frame_phase & CIF_CSI_FRAME1_READY) {
986 		if (scale_vdev->next_buf)
987 			active_buf = scale_vdev->next_buf;
988 	}
989 
990 	ret = rkcif_assign_scale_buffer_pingpong(scale_vdev,
991 					 RKCIF_YUV_ADDR_STATE_UPDATE,
992 					 ch);
993 
994 	scale_vdev->frame_idx = scale_vdev->stream->frame_idx;
995 	if (active_buf && (!ret)) {
996 		vb_done = &active_buf->vb;
997 		vb_done->vb2_buf.timestamp = ktime_get_ns();
998 		vb_done->sequence = scale_vdev->frame_idx;
999 		rkcif_scale_vb_done_oneframe(scale_vdev, vb_done);
1000 	}
1001 }
1002 
rkcif_irq_handle_scale(struct rkcif_device * cif_dev,unsigned int intstat_glb)1003 void rkcif_irq_handle_scale(struct rkcif_device *cif_dev, unsigned int intstat_glb)
1004 {
1005 	struct rkcif_scale_vdev *scale_vdev;
1006 	struct rkcif_stream *stream;
1007 	int ch;
1008 	int i = 0;
1009 	u32 val = 0;
1010 
1011 	val = SCALE_FIFO_OVERFLOW(0) |
1012 	      SCALE_FIFO_OVERFLOW(1) |
1013 	      SCALE_FIFO_OVERFLOW(2) |
1014 	      SCALE_FIFO_OVERFLOW(3);
1015 	if (intstat_glb & val) {
1016 		v4l2_err(&cif_dev->v4l2_dev,
1017 			"ERROR: scale channel, overflow intstat_glb:0x%x !!\n",
1018 			intstat_glb);
1019 		return;
1020 	}
1021 
1022 	ch = rkcif_scale_g_ch(&cif_dev->v4l2_dev,
1023 				      intstat_glb);
1024 	if (ch < 0)
1025 		return;
1026 
1027 	for (i = 0; i < RKCIF_MAX_STREAM_MIPI; i++) {
1028 		ch = rkcif_scale_g_ch(&cif_dev->v4l2_dev,
1029 				      intstat_glb);
1030 		if (ch < 0)
1031 			continue;
1032 
1033 		scale_vdev = &cif_dev->scale_vdev[ch];
1034 
1035 		if (scale_vdev->state != RKCIF_STATE_STREAMING)
1036 			continue;
1037 
1038 		if (scale_vdev->stopping) {
1039 			rkcif_scale_stop(scale_vdev);
1040 			scale_vdev->stopping = false;
1041 			wake_up(&scale_vdev->wq_stopped);
1042 			continue;
1043 		}
1044 
1045 		scale_vdev->frame_phase = SW_SCALE_END(intstat_glb, ch);
1046 		intstat_glb &= ~(SCALE_END_INTSTAT(ch));
1047 		rkcif_scale_update_stream(scale_vdev, ch);
1048 		stream = scale_vdev->stream;
1049 		if (stream->to_en_dma)
1050 			rkcif_enable_dma_capture(stream, false);
1051 	}
1052 }
1053 
rkcif_init_scale_vdev(struct rkcif_device * cif_dev,u32 ch)1054 void rkcif_init_scale_vdev(struct rkcif_device *cif_dev, u32 ch)
1055 {
1056 	struct rkcif_scale_vdev *scale_vdev = &cif_dev->scale_vdev[ch];
1057 	struct rkcif_stream *stream = &cif_dev->stream[ch];
1058 	struct v4l2_pix_format_mplane pixm;
1059 
1060 	memset(scale_vdev, 0, sizeof(*scale_vdev));
1061 	memset(&pixm, 0, sizeof(pixm));
1062 	scale_vdev->cifdev = cif_dev;
1063 	scale_vdev->stream = stream;
1064 	stream->scale_vdev = scale_vdev;
1065 	scale_vdev->ch = ch;
1066 	scale_vdev->ch_src = 0;
1067 	scale_vdev->frame_idx = 0;
1068 	pixm.pixelformat = V4L2_PIX_FMT_SBGGR16;
1069 	pixm.width = RKCIF_DEFAULT_WIDTH;
1070 	pixm.height = RKCIF_DEFAULT_HEIGHT;
1071 	scale_vdev->state = RKCIF_STATE_READY;
1072 	scale_vdev->stopping = false;
1073 	scale_vdev->blc.pattern00 = 0;
1074 	scale_vdev->blc.pattern01 = 0;
1075 	scale_vdev->blc.pattern02 = 0;
1076 	scale_vdev->blc.pattern03 = 0;
1077 	INIT_LIST_HEAD(&scale_vdev->buf_head);
1078 	spin_lock_init(&scale_vdev->vbq_lock);
1079 	init_waitqueue_head(&scale_vdev->wq_stopped);
1080 	rkcif_scale_set_fmt(scale_vdev, &pixm, false);
1081 }
1082 
rkcif_register_scale_vdev(struct rkcif_scale_vdev * scale_vdev,bool is_multi_input)1083 static int rkcif_register_scale_vdev(struct rkcif_scale_vdev *scale_vdev, bool is_multi_input)
1084 {
1085 	int ret = 0;
1086 	struct video_device *vdev = &scale_vdev->vnode.vdev;
1087 	struct rkcif_vdev_node *node;
1088 	char *vdev_name;
1089 
1090 	switch (scale_vdev->ch) {
1091 	case RKCIF_SCALE_CH0:
1092 		vdev_name = CIF_SCALE_CH0_VDEV_NAME;
1093 		break;
1094 	case RKCIF_SCALE_CH1:
1095 		vdev_name = CIF_SCALE_CH1_VDEV_NAME;
1096 		break;
1097 	case RKCIF_SCALE_CH2:
1098 		vdev_name = CIF_SCALE_CH2_VDEV_NAME;
1099 		break;
1100 	case RKCIF_SCALE_CH3:
1101 		vdev_name = CIF_SCALE_CH3_VDEV_NAME;
1102 		break;
1103 	default:
1104 		ret = -EINVAL;
1105 		v4l2_err(&scale_vdev->cifdev->v4l2_dev, "Invalid stream\n");
1106 		goto err_cleanup_media_entity;
1107 	}
1108 
1109 	strscpy(vdev->name, vdev_name, sizeof(vdev->name));
1110 	node = container_of(vdev, struct rkcif_vdev_node, vdev);
1111 	mutex_init(&node->vlock);
1112 
1113 	vdev->ioctl_ops = &rkcif_scale_ioctl;
1114 	vdev->fops = &rkcif_scale_fops;
1115 	vdev->release = video_device_release_empty;
1116 	vdev->lock = &node->vlock;
1117 	vdev->v4l2_dev = &scale_vdev->cifdev->v4l2_dev;
1118 	vdev->queue = &node->buf_queue;
1119 	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
1120 			    V4L2_CAP_STREAMING;
1121 	vdev->vfl_dir =  VFL_DIR_RX;
1122 	node->pad.flags = MEDIA_PAD_FL_SINK;
1123 	video_set_drvdata(vdev, scale_vdev);
1124 
1125 	rkcif_scale_init_vb2_queue(&node->buf_queue,
1126 				   scale_vdev,
1127 				   V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
1128 	vdev->queue = &node->buf_queue;
1129 
1130 	ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
1131 	if (ret < 0)
1132 		goto err_release_queue;
1133 
1134 	ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1135 	if (ret < 0) {
1136 		dev_err(&vdev->dev,
1137 			"could not register Video for Linux device\n");
1138 		goto err_cleanup_media_entity;
1139 	}
1140 	return 0;
1141 
1142 err_cleanup_media_entity:
1143 	media_entity_cleanup(&vdev->entity);
1144 err_release_queue:
1145 	vb2_queue_release(vdev->queue);
1146 	return ret;
1147 }
1148 
rkcif_unregister_scale_vdev(struct rkcif_scale_vdev * scale_vdev)1149 static void rkcif_unregister_scale_vdev(struct rkcif_scale_vdev *scale_vdev)
1150 {
1151 	struct rkcif_vdev_node *node = &scale_vdev->vnode;
1152 	struct video_device *vdev = &node->vdev;
1153 
1154 	video_unregister_device(vdev);
1155 	media_entity_cleanup(&vdev->entity);
1156 	vb2_queue_release(vdev->queue);
1157 }
1158 
rkcif_register_scale_vdevs(struct rkcif_device * cif_dev,int stream_num,bool is_multi_input)1159 int rkcif_register_scale_vdevs(struct rkcif_device *cif_dev,
1160 			       int stream_num,
1161 			       bool is_multi_input)
1162 {
1163 	struct rkcif_scale_vdev *scale_vdev;
1164 	int i, j, ret;
1165 
1166 	for (i = 0; i < stream_num; i++) {
1167 		scale_vdev = &cif_dev->scale_vdev[i];
1168 		ret = rkcif_register_scale_vdev(scale_vdev, is_multi_input);
1169 		if (ret < 0)
1170 			goto err;
1171 	}
1172 
1173 	return 0;
1174 err:
1175 	for (j = 0; j < i; j++) {
1176 		scale_vdev = &cif_dev->scale_vdev[j];
1177 		rkcif_unregister_scale_vdev(scale_vdev);
1178 	}
1179 
1180 	return ret;
1181 }
1182 
rkcif_unregister_scale_vdevs(struct rkcif_device * cif_dev,int stream_num)1183 void rkcif_unregister_scale_vdevs(struct rkcif_device *cif_dev,
1184 				  int stream_num)
1185 {
1186 	struct rkcif_scale_vdev *scale_vdev;
1187 	int i;
1188 
1189 	for (i = 0; i < stream_num; i++) {
1190 		scale_vdev = &cif_dev->scale_vdev[i];
1191 		rkcif_unregister_scale_vdev(scale_vdev);
1192 	}
1193 }
1194 
1195