1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Rockchip CIF Driver
4 *
5 * Copyright (C) 2020 Rockchip Electronics Co., Ltd.
6 */
7 #include <linux/clk.h>
8 #include <linux/delay.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/of_gpio.h>
13 #include <linux/of_graph.h>
14 #include <linux/of_platform.h>
15 #include <linux/of_reserved_mem.h>
16 #include <linux/reset.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/pinctrl/consumer.h>
19 #include <linux/regmap.h>
20 #include <media/videobuf2-dma-contig.h>
21 #include <media/v4l2-fwnode.h>
22 #include "dev.h"
23 #include <linux/regulator/consumer.h>
24 #include <linux/rk-camera-module.h>
25 #include "common.h"
26
to_sditf_priv(struct v4l2_subdev * subdev)27 static inline struct sditf_priv *to_sditf_priv(struct v4l2_subdev *subdev)
28 {
29 return container_of(subdev, struct sditf_priv, sd);
30 }
31
sditf_buffree_work(struct work_struct * work)32 static void sditf_buffree_work(struct work_struct *work)
33 {
34 struct sditf_work_struct *buffree_work = container_of(work,
35 struct sditf_work_struct,
36 work);
37 struct sditf_priv *priv = container_of(buffree_work,
38 struct sditf_priv,
39 buffree_work);
40 struct rkcif_rx_buffer *rx_buf = NULL;
41 unsigned long flags;
42 LIST_HEAD(local_list);
43
44 spin_lock_irqsave(&priv->cif_dev->buffree_lock, flags);
45 list_replace_init(&priv->buf_free_list, &local_list);
46 while (!list_empty(&local_list)) {
47 rx_buf = list_first_entry(&local_list,
48 struct rkcif_rx_buffer, list_free);
49 if (rx_buf) {
50 list_del(&rx_buf->list_free);
51 rkcif_free_reserved_mem_buf(priv->cif_dev, rx_buf);
52 }
53 }
54 spin_unlock_irqrestore(&priv->cif_dev->buffree_lock, flags);
55 }
56
sditf_get_hdr_mode(struct sditf_priv * priv)57 static void sditf_get_hdr_mode(struct sditf_priv *priv)
58 {
59 struct rkcif_device *cif_dev = priv->cif_dev;
60 struct rkmodule_hdr_cfg hdr_cfg;
61 int ret = 0;
62
63 if (!cif_dev->terminal_sensor.sd)
64 rkcif_update_sensor_info(&cif_dev->stream[0]);
65
66 if (cif_dev->terminal_sensor.sd) {
67 ret = v4l2_subdev_call(cif_dev->terminal_sensor.sd,
68 core, ioctl,
69 RKMODULE_GET_HDR_CFG,
70 &hdr_cfg);
71 if (!ret)
72 priv->hdr_cfg = hdr_cfg;
73 else
74 priv->hdr_cfg.hdr_mode = NO_HDR;
75 } else {
76 priv->hdr_cfg.hdr_mode = NO_HDR;
77 }
78 }
79
sditf_g_frame_interval(struct v4l2_subdev * sd,struct v4l2_subdev_frame_interval * fi)80 static int sditf_g_frame_interval(struct v4l2_subdev *sd,
81 struct v4l2_subdev_frame_interval *fi)
82 {
83 struct sditf_priv *priv = to_sditf_priv(sd);
84 struct rkcif_device *cif_dev = priv->cif_dev;
85 struct v4l2_subdev *sensor_sd;
86
87 if (!cif_dev->terminal_sensor.sd)
88 rkcif_update_sensor_info(&cif_dev->stream[0]);
89
90 if (cif_dev->terminal_sensor.sd) {
91 sensor_sd = cif_dev->terminal_sensor.sd;
92 return v4l2_subdev_call(sensor_sd, video, g_frame_interval, fi);
93 }
94
95 return -EINVAL;
96 }
97
sditf_g_mbus_config(struct v4l2_subdev * sd,unsigned int pad_id,struct v4l2_mbus_config * config)98 static int sditf_g_mbus_config(struct v4l2_subdev *sd, unsigned int pad_id,
99 struct v4l2_mbus_config *config)
100 {
101 struct sditf_priv *priv = to_sditf_priv(sd);
102 struct rkcif_device *cif_dev = priv->cif_dev;
103 struct v4l2_subdev *sensor_sd;
104
105 if (!cif_dev->active_sensor)
106 rkcif_update_sensor_info(&cif_dev->stream[0]);
107
108 if (cif_dev->active_sensor) {
109 sensor_sd = cif_dev->active_sensor->sd;
110 return v4l2_subdev_call(sensor_sd, pad, get_mbus_config, 0, config);
111 } else {
112 config->type = V4L2_MBUS_CSI2_DPHY;
113 config->flags = V4L2_MBUS_CSI2_CHANNEL_0 |
114 V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
115 return 0;
116 }
117
118 return -EINVAL;
119 }
120
sditf_get_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_format * fmt)121 static int sditf_get_set_fmt(struct v4l2_subdev *sd,
122 struct v4l2_subdev_pad_config *cfg,
123 struct v4l2_subdev_format *fmt)
124 {
125 struct sditf_priv *priv = to_sditf_priv(sd);
126 struct rkcif_device *cif_dev = priv->cif_dev;
127 struct v4l2_subdev_selection input_sel;
128 struct v4l2_pix_format_mplane pixm;
129 const struct cif_output_fmt *out_fmt;
130 int ret = -EINVAL;
131 bool is_uncompact = false;
132
133 if (!cif_dev->terminal_sensor.sd)
134 rkcif_update_sensor_info(&cif_dev->stream[0]);
135
136 if (cif_dev->terminal_sensor.sd) {
137 sditf_get_hdr_mode(priv);
138 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
139 fmt->pad = 0;
140 ret = v4l2_subdev_call(cif_dev->terminal_sensor.sd, pad, get_fmt, NULL, fmt);
141 if (ret) {
142 v4l2_err(&priv->sd,
143 "%s: get sensor format failed\n", __func__);
144 return ret;
145 }
146
147 input_sel.target = V4L2_SEL_TGT_CROP_BOUNDS;
148 input_sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
149 input_sel.pad = 0;
150 ret = v4l2_subdev_call(cif_dev->terminal_sensor.sd,
151 pad, get_selection, NULL,
152 &input_sel);
153 if (!ret) {
154 fmt->format.width = input_sel.r.width;
155 fmt->format.height = input_sel.r.height;
156 }
157 priv->cap_info.width = fmt->format.width;
158 priv->cap_info.height = fmt->format.height;
159 pixm.pixelformat = rkcif_mbus_pixelcode_to_v4l2(fmt->format.code);
160 pixm.width = priv->cap_info.width;
161 pixm.height = priv->cap_info.height;
162
163 out_fmt = rkcif_find_output_fmt(NULL, pixm.pixelformat);
164 if (priv->toisp_inf.link_mode == TOISP_UNITE &&
165 ((pixm.width / 2 - RKMOUDLE_UNITE_EXTEND_PIXEL) * out_fmt->raw_bpp / 8) & 0xf)
166 is_uncompact = true;
167
168 v4l2_dbg(1, rkcif_debug, &cif_dev->v4l2_dev,
169 "%s, width %d, height %d, hdr mode %d\n",
170 __func__, fmt->format.width, fmt->format.height, priv->hdr_cfg.hdr_mode);
171 if (priv->hdr_cfg.hdr_mode == NO_HDR ||
172 priv->hdr_cfg.hdr_mode == HDR_COMPR) {
173 rkcif_set_fmt(&cif_dev->stream[0], &pixm, false);
174 } else if (priv->hdr_cfg.hdr_mode == HDR_X2) {
175 if (priv->mode.rdbk_mode == RKISP_VICAP_ONLINE &&
176 priv->toisp_inf.link_mode == TOISP_UNITE) {
177 if (is_uncompact) {
178 cif_dev->stream[0].is_compact = false;
179 cif_dev->stream[0].is_high_align = true;
180 } else {
181 cif_dev->stream[0].is_compact = true;
182 }
183 }
184 rkcif_set_fmt(&cif_dev->stream[0], &pixm, false);
185 rkcif_set_fmt(&cif_dev->stream[1], &pixm, false);
186 } else if (priv->hdr_cfg.hdr_mode == HDR_X3) {
187 if (priv->mode.rdbk_mode == RKISP_VICAP_ONLINE &&
188 priv->toisp_inf.link_mode == TOISP_UNITE) {
189 if (is_uncompact) {
190 cif_dev->stream[0].is_compact = false;
191 cif_dev->stream[0].is_high_align = true;
192 cif_dev->stream[1].is_compact = false;
193 cif_dev->stream[1].is_high_align = true;
194 } else {
195 cif_dev->stream[0].is_compact = true;
196 cif_dev->stream[1].is_compact = true;
197 }
198 }
199 rkcif_set_fmt(&cif_dev->stream[0], &pixm, false);
200 rkcif_set_fmt(&cif_dev->stream[1], &pixm, false);
201 rkcif_set_fmt(&cif_dev->stream[2], &pixm, false);
202 }
203 } else {
204 if (priv->sensor_sd) {
205 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
206 fmt->pad = 0;
207 ret = v4l2_subdev_call(priv->sensor_sd, pad, get_fmt, NULL, fmt);
208 if (ret) {
209 v4l2_err(&priv->sd,
210 "%s: get sensor format failed\n", __func__);
211 return ret;
212 }
213
214 input_sel.target = V4L2_SEL_TGT_CROP_BOUNDS;
215 input_sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
216 input_sel.pad = 0;
217 ret = v4l2_subdev_call(priv->sensor_sd,
218 pad, get_selection, NULL,
219 &input_sel);
220 if (!ret) {
221 fmt->format.width = input_sel.r.width;
222 fmt->format.height = input_sel.r.height;
223 }
224 priv->cap_info.width = fmt->format.width;
225 priv->cap_info.height = fmt->format.height;
226 pixm.pixelformat = rkcif_mbus_pixelcode_to_v4l2(fmt->format.code);
227 pixm.width = priv->cap_info.width;
228 pixm.height = priv->cap_info.height;
229 } else {
230 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
231 fmt->pad = 0;
232 fmt->format.code = MEDIA_BUS_FMT_SBGGR10_1X10;
233 fmt->format.width = 640;
234 fmt->format.height = 480;
235 }
236 }
237
238 return 0;
239 }
240
sditf_init_buf(struct sditf_priv * priv)241 static int sditf_init_buf(struct sditf_priv *priv)
242 {
243 struct rkcif_device *cif_dev = priv->cif_dev;
244 int ret = 0;
245
246 if (priv->hdr_cfg.hdr_mode == HDR_X2) {
247 if (priv->mode.rdbk_mode == RKISP_VICAP_RDBK_AUTO) {
248 if (cif_dev->is_thunderboot)
249 cif_dev->resmem_size /= 2;
250 ret = rkcif_init_rx_buf(&cif_dev->stream[0], priv->buf_num);
251 if (cif_dev->is_thunderboot)
252 cif_dev->resmem_pa += cif_dev->resmem_size;
253 ret |= rkcif_init_rx_buf(&cif_dev->stream[1], priv->buf_num);
254 } else {
255 ret = rkcif_init_rx_buf(&cif_dev->stream[0], priv->buf_num);
256 }
257 } else if (priv->hdr_cfg.hdr_mode == HDR_X3) {
258 if (priv->mode.rdbk_mode == RKISP_VICAP_RDBK_AUTO) {
259 if (cif_dev->is_thunderboot)
260 cif_dev->resmem_size /= 3;
261 ret = rkcif_init_rx_buf(&cif_dev->stream[0], priv->buf_num);
262 if (cif_dev->is_thunderboot)
263 cif_dev->resmem_pa += cif_dev->resmem_size;
264 ret |= rkcif_init_rx_buf(&cif_dev->stream[1], priv->buf_num);
265 if (cif_dev->is_thunderboot)
266 cif_dev->resmem_pa += cif_dev->resmem_size;
267 ret |= rkcif_init_rx_buf(&cif_dev->stream[2], priv->buf_num);
268 } else {
269 ret = rkcif_init_rx_buf(&cif_dev->stream[0], priv->buf_num);
270 ret |= rkcif_init_rx_buf(&cif_dev->stream[1], priv->buf_num);
271 }
272 } else {
273 if (priv->mode.rdbk_mode == RKISP_VICAP_RDBK_AUTO)
274 ret = rkcif_init_rx_buf(&cif_dev->stream[0], priv->buf_num);
275 else
276 ret = -EINVAL;
277 }
278 return ret;
279 }
280
sditf_free_buf(struct sditf_priv * priv)281 static void sditf_free_buf(struct sditf_priv *priv)
282 {
283 struct rkcif_device *cif_dev = priv->cif_dev;
284
285 if (priv->hdr_cfg.hdr_mode == HDR_X2) {
286 rkcif_free_rx_buf(&cif_dev->stream[0], priv->buf_num);
287 rkcif_free_rx_buf(&cif_dev->stream[1], priv->buf_num);
288 } else if (priv->hdr_cfg.hdr_mode == HDR_X3) {
289 rkcif_free_rx_buf(&cif_dev->stream[0], priv->buf_num);
290 rkcif_free_rx_buf(&cif_dev->stream[1], priv->buf_num);
291 rkcif_free_rx_buf(&cif_dev->stream[2], priv->buf_num);
292 } else {
293 rkcif_free_rx_buf(&cif_dev->stream[0], priv->buf_num);
294 }
295 if (cif_dev->is_thunderboot) {
296 cif_dev->wait_line_cache = 0;
297 cif_dev->wait_line = 0;
298 cif_dev->wait_line_bak = 0;
299 cif_dev->is_thunderboot = false;
300 }
301 }
302
sditf_get_selection(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_selection * sel)303 static int sditf_get_selection(struct v4l2_subdev *sd,
304 struct v4l2_subdev_pad_config *cfg,
305 struct v4l2_subdev_selection *sel)
306 {
307 return -EINVAL;
308 }
309
sditf_reinit_mode(struct sditf_priv * priv,struct rkisp_vicap_mode * mode)310 static void sditf_reinit_mode(struct sditf_priv *priv, struct rkisp_vicap_mode *mode)
311 {
312 if (mode->rdbk_mode == RKISP_VICAP_RDBK_AIQ) {
313 priv->toisp_inf.link_mode = TOISP_NONE;
314 } else {
315 if (strstr(mode->name, RKISP0_DEVNAME))
316 priv->toisp_inf.link_mode = TOISP0;
317 else if (strstr(mode->name, RKISP1_DEVNAME))
318 priv->toisp_inf.link_mode = TOISP1;
319 else if (strstr(mode->name, RKISP_UNITE_DEVNAME))
320 priv->toisp_inf.link_mode = TOISP_UNITE;
321 else
322 priv->toisp_inf.link_mode = TOISP0;
323 }
324
325 v4l2_dbg(1, rkcif_debug, &priv->cif_dev->v4l2_dev,
326 "%s, mode->rdbk_mode %d, mode->name %s, link_mode %d\n",
327 __func__, mode->rdbk_mode, mode->name, priv->toisp_inf.link_mode);
328 }
329
sditf_ioctl(struct v4l2_subdev * sd,unsigned int cmd,void * arg)330 static long sditf_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
331 {
332 struct sditf_priv *priv = to_sditf_priv(sd);
333 struct rkisp_vicap_mode *mode;
334 struct v4l2_subdev_format fmt;
335 struct rkcif_device *cif_dev = priv->cif_dev;
336 struct v4l2_subdev *sensor_sd;
337 int *pbuf_num = NULL;
338 int ret = 0;
339
340 switch (cmd) {
341 case RKISP_VICAP_CMD_MODE:
342 mode = (struct rkisp_vicap_mode *)arg;
343 memcpy(&priv->mode, mode, sizeof(*mode));
344 sditf_reinit_mode(priv, &priv->mode);
345 if (priv->is_combine_mode)
346 mode->input.merge_num = cif_dev->sditf_cnt;
347 else
348 mode->input.merge_num = 1;
349 mode->input.index = priv->combine_index;
350 return 0;
351 case RKISP_VICAP_CMD_INIT_BUF:
352 pbuf_num = (int *)arg;
353 priv->buf_num = *pbuf_num;
354 sditf_get_set_fmt(&priv->sd, NULL, &fmt);
355 ret = sditf_init_buf(priv);
356 return ret;
357 case RKMODULE_GET_HDR_CFG:
358 if (!cif_dev->terminal_sensor.sd)
359 rkcif_update_sensor_info(&cif_dev->stream[0]);
360
361 if (cif_dev->terminal_sensor.sd) {
362 sensor_sd = cif_dev->terminal_sensor.sd;
363 return v4l2_subdev_call(sensor_sd, core, ioctl, cmd, arg);
364 }
365 break;
366 default:
367 break;
368 }
369
370 return -EINVAL;
371 }
372
373 #ifdef CONFIG_COMPAT
sditf_compat_ioctl32(struct v4l2_subdev * sd,unsigned int cmd,unsigned long arg)374 static long sditf_compat_ioctl32(struct v4l2_subdev *sd,
375 unsigned int cmd, unsigned long arg)
376 {
377 void __user *up = compat_ptr(arg);
378 struct sditf_priv *priv = to_sditf_priv(sd);
379 struct rkcif_device *cif_dev = priv->cif_dev;
380 struct v4l2_subdev *sensor_sd;
381 struct rkisp_vicap_mode *mode;
382 struct rkmodule_hdr_cfg *hdr_cfg;
383 int buf_num;
384 int ret = 0;
385
386 switch (cmd) {
387 case RKISP_VICAP_CMD_MODE:
388 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
389 if (!mode) {
390 ret = -ENOMEM;
391 return ret;
392 }
393 if (copy_from_user(mode, up, sizeof(*mode))) {
394 kfree(mode);
395 return -EFAULT;
396 }
397 ret = sditf_ioctl(sd, cmd, mode);
398 kfree(mode);
399 return ret;
400 case RKISP_VICAP_CMD_INIT_BUF:
401 if (copy_from_user(&buf_num, up, sizeof(int)))
402 return -EFAULT;
403 ret = sditf_ioctl(sd, cmd, &buf_num);
404 return ret;
405 case RKMODULE_GET_HDR_CFG:
406 hdr_cfg = kzalloc(sizeof(*hdr_cfg), GFP_KERNEL);
407 if (!hdr_cfg) {
408 ret = -ENOMEM;
409 return ret;
410 }
411 if (copy_from_user(hdr_cfg, up, sizeof(*hdr_cfg))) {
412 kfree(hdr_cfg);
413 return -EFAULT;
414 }
415 ret = sditf_ioctl(sd, cmd, hdr_cfg);
416 return ret;
417 default:
418 break;
419 }
420
421 if (!cif_dev->terminal_sensor.sd)
422 rkcif_update_sensor_info(&cif_dev->stream[0]);
423
424 if (cif_dev->terminal_sensor.sd) {
425 sensor_sd = cif_dev->terminal_sensor.sd;
426 return v4l2_subdev_call(sensor_sd, core, compat_ioctl32, cmd, arg);
427 }
428
429 return -EINVAL;
430 }
431 #endif
432
sditf_channel_enable(struct sditf_priv * priv,int user)433 static int sditf_channel_enable(struct sditf_priv *priv, int user)
434 {
435 struct rkcif_device *cif_dev = priv->cif_dev;
436 struct rkmodule_capture_info *capture_info = &cif_dev->channels[0].capture_info;
437 unsigned int ch0 = 0, ch1 = 0, ch2 = 0;
438 unsigned int ctrl_val = 0;
439 unsigned int int_en = 0;
440 unsigned int offset_x = 0;
441 unsigned int offset_y = 0;
442 unsigned int width = priv->cap_info.width;
443 unsigned int height = priv->cap_info.height;
444 int csi_idx = cif_dev->csi_host_idx;
445
446 if (capture_info->mode == RKMODULE_MULTI_DEV_COMBINE_ONE &&
447 priv->toisp_inf.link_mode == TOISP_UNITE) {
448 if (capture_info->multi_dev.dev_num != 2 ||
449 capture_info->multi_dev.pixel_offset != RKMOUDLE_UNITE_EXTEND_PIXEL) {
450 v4l2_err(&cif_dev->v4l2_dev,
451 "param error of online mode, combine dev num %d, offset %d\n",
452 capture_info->multi_dev.dev_num,
453 capture_info->multi_dev.pixel_offset);
454 return -EINVAL;
455 }
456 csi_idx = capture_info->multi_dev.dev_idx[user];
457 }
458
459 if (priv->hdr_cfg.hdr_mode == NO_HDR ||
460 priv->hdr_cfg.hdr_mode == HDR_COMPR) {
461 if (cif_dev->inf_id == RKCIF_MIPI_LVDS)
462 ch0 = csi_idx * 4;
463 else
464 ch0 = 24;//dvp
465 ctrl_val = (ch0 << 3) | 0x1;
466 if (user == 0)
467 int_en = CIF_TOISP0_FS(0);
468 else
469 int_en = CIF_TOISP1_FS(0);
470 priv->toisp_inf.ch_info[0].is_valid = true;
471 priv->toisp_inf.ch_info[0].id = ch0;
472 } else if (priv->hdr_cfg.hdr_mode == HDR_X2) {
473 ch0 = cif_dev->csi_host_idx * 4 + 1;
474 ch1 = cif_dev->csi_host_idx * 4;
475 ctrl_val = (ch0 << 3) | 0x1;
476 ctrl_val |= (ch1 << 11) | 0x100;
477 if (user == 0)
478 int_en = CIF_TOISP0_FS(0) | CIF_TOISP0_FS(1);
479 else
480 int_en = CIF_TOISP1_FS(0) | CIF_TOISP1_FS(1);
481 priv->toisp_inf.ch_info[0].is_valid = true;
482 priv->toisp_inf.ch_info[0].id = ch0;
483 priv->toisp_inf.ch_info[1].is_valid = true;
484 priv->toisp_inf.ch_info[1].id = ch1;
485 } else if (priv->hdr_cfg.hdr_mode == HDR_X3) {
486 ch0 = cif_dev->csi_host_idx * 4 + 2;
487 ch1 = cif_dev->csi_host_idx * 4 + 1;
488 ch2 = cif_dev->csi_host_idx * 4;
489 ctrl_val = (ch0 << 3) | 0x1;
490 ctrl_val |= (ch1 << 11) | 0x100;
491 ctrl_val |= (ch2 << 19) | 0x10000;
492 if (user == 0)
493 int_en = CIF_TOISP0_FS(0) | CIF_TOISP0_FS(1) | CIF_TOISP0_FS(2);
494 else
495 int_en = CIF_TOISP1_FS(0) | CIF_TOISP1_FS(1) | CIF_TOISP1_FS(2);
496 priv->toisp_inf.ch_info[0].is_valid = true;
497 priv->toisp_inf.ch_info[0].id = ch0;
498 priv->toisp_inf.ch_info[1].is_valid = true;
499 priv->toisp_inf.ch_info[1].id = ch1;
500 priv->toisp_inf.ch_info[2].is_valid = true;
501 priv->toisp_inf.ch_info[2].id = ch2;
502 }
503 if (user == 0) {
504 if (priv->toisp_inf.link_mode == TOISP_UNITE)
505 width = priv->cap_info.width / 2 + RKMOUDLE_UNITE_EXTEND_PIXEL;
506 rkcif_write_register(cif_dev, CIF_REG_TOISP0_CTRL, ctrl_val);
507 if (width && height) {
508 rkcif_write_register(cif_dev, CIF_REG_TOISP0_CROP,
509 offset_x | (offset_y << 16));
510 rkcif_write_register(cif_dev, CIF_REG_TOISP0_SIZE,
511 width | (height << 16));
512 } else {
513 return -EINVAL;
514 }
515 } else {
516 if (priv->toisp_inf.link_mode == TOISP_UNITE) {
517 if (capture_info->mode == RKMODULE_MULTI_DEV_COMBINE_ONE)
518 offset_x = 0;
519 else
520 offset_x = priv->cap_info.width / 2 - RKMOUDLE_UNITE_EXTEND_PIXEL;
521 width = priv->cap_info.width / 2 + RKMOUDLE_UNITE_EXTEND_PIXEL;
522 }
523 rkcif_write_register(cif_dev, CIF_REG_TOISP1_CTRL, ctrl_val);
524 if (width && height) {
525 rkcif_write_register(cif_dev, CIF_REG_TOISP1_CROP,
526 offset_x | (offset_y << 16));
527 rkcif_write_register(cif_dev, CIF_REG_TOISP1_SIZE,
528 width | (height << 16));
529 } else {
530 return -EINVAL;
531 }
532 }
533 #if IS_ENABLED(CONFIG_CPU_RV1106)
534 rv1106_sdmmc_get_lock();
535 #endif
536 rkcif_write_register_or(cif_dev, CIF_REG_GLB_INTEN, int_en);
537 #if IS_ENABLED(CONFIG_CPU_RV1106)
538 rv1106_sdmmc_put_lock();
539 #endif
540 return 0;
541 }
542
sditf_channel_disable(struct sditf_priv * priv,int user)543 static void sditf_channel_disable(struct sditf_priv *priv, int user)
544 {
545 struct rkcif_device *cif_dev = priv->cif_dev;
546 unsigned int ctrl_val = 0;
547
548 if (priv->hdr_cfg.hdr_mode == NO_HDR ||
549 priv->hdr_cfg.hdr_mode == HDR_COMPR) {
550 if (user == 0)
551 ctrl_val = CIF_TOISP0_FE(0);
552 else
553 ctrl_val = CIF_TOISP1_FE(0);
554 } else if (priv->hdr_cfg.hdr_mode == HDR_X2) {
555 if (user == 0)
556 ctrl_val = CIF_TOISP0_FE(0) | CIF_TOISP0_FE(1);
557 else
558 ctrl_val = CIF_TOISP1_FE(0) | CIF_TOISP1_FE(1);
559 } else if (priv->hdr_cfg.hdr_mode == HDR_X3) {
560 if (user == 0)
561 ctrl_val = CIF_TOISP0_FE(0) | CIF_TOISP0_FE(1) | CIF_TOISP0_FE(2);
562 else
563 ctrl_val = CIF_TOISP1_FE(0) | CIF_TOISP1_FE(1) | CIF_TOISP1_FE(2);
564 }
565 #if IS_ENABLED(CONFIG_CPU_RV1106)
566 rv1106_sdmmc_get_lock();
567 #endif
568 rkcif_write_register_or(cif_dev, CIF_REG_GLB_INTEN, ctrl_val);
569 #if IS_ENABLED(CONFIG_CPU_RV1106)
570 rv1106_sdmmc_put_lock();
571 #endif
572 priv->toisp_inf.ch_info[0].is_valid = false;
573 priv->toisp_inf.ch_info[1].is_valid = false;
574 priv->toisp_inf.ch_info[2].is_valid = false;
575 }
576
sditf_change_to_online(struct sditf_priv * priv)577 void sditf_change_to_online(struct sditf_priv *priv)
578 {
579 struct rkcif_device *cif_dev = priv->cif_dev;
580
581 priv->mode.rdbk_mode = RKISP_VICAP_ONLINE;
582 if (priv->toisp_inf.link_mode == TOISP0) {
583 sditf_channel_enable(priv, 0);
584 } else if (priv->toisp_inf.link_mode == TOISP1) {
585 sditf_channel_enable(priv, 1);
586 } else if (priv->toisp_inf.link_mode == TOISP_UNITE) {
587 sditf_channel_enable(priv, 0);
588 sditf_channel_enable(priv, 1);
589 }
590 if (priv->hdr_cfg.hdr_mode == NO_HDR) {
591 rkcif_free_rx_buf(&cif_dev->stream[0], priv->buf_num);
592 cif_dev->stream[0].is_line_wake_up = false;
593 } else if (priv->hdr_cfg.hdr_mode == HDR_X2) {
594 rkcif_free_rx_buf(&cif_dev->stream[1], priv->buf_num);
595 cif_dev->stream[0].is_line_wake_up = false;
596 cif_dev->stream[1].is_line_wake_up = false;
597 } else if (priv->hdr_cfg.hdr_mode == HDR_X3) {
598 rkcif_free_rx_buf(&cif_dev->stream[2], priv->buf_num);
599 cif_dev->stream[0].is_line_wake_up = false;
600 cif_dev->stream[1].is_line_wake_up = false;
601 cif_dev->stream[2].is_line_wake_up = false;
602 }
603 cif_dev->wait_line_cache = 0;
604 cif_dev->wait_line = 0;
605 cif_dev->wait_line_bak = 0;
606 }
607
sditf_check_capture_mode(struct rkcif_device * cif_dev)608 static void sditf_check_capture_mode(struct rkcif_device *cif_dev)
609 {
610 struct rkcif_device *dev = NULL;
611 int i = 0;
612 int toisp_cnt = 0;
613
614 for (i = 0; i < cif_dev->hw_dev->dev_num; i++) {
615 dev = cif_dev->hw_dev->cif_dev[i];
616 if (dev && dev->sditf_cnt)
617 toisp_cnt++;
618 }
619 if (cif_dev->is_thunderboot && toisp_cnt == 1)
620 cif_dev->is_rdbk_to_online = true;
621 else
622 cif_dev->is_rdbk_to_online = false;
623 }
624
sditf_start_stream(struct sditf_priv * priv)625 static int sditf_start_stream(struct sditf_priv *priv)
626 {
627 struct rkcif_device *cif_dev = priv->cif_dev;
628 struct v4l2_subdev_format fmt;
629 unsigned int mode = RKCIF_STREAM_MODE_TOISP;
630
631 sditf_check_capture_mode(cif_dev);
632 sditf_get_set_fmt(&priv->sd, NULL, &fmt);
633 if (priv->mode.rdbk_mode == RKISP_VICAP_ONLINE) {
634 if (priv->toisp_inf.link_mode == TOISP0) {
635 sditf_channel_enable(priv, 0);
636 } else if (priv->toisp_inf.link_mode == TOISP1) {
637 sditf_channel_enable(priv, 1);
638 } else if (priv->toisp_inf.link_mode == TOISP_UNITE) {
639 sditf_channel_enable(priv, 0);
640 sditf_channel_enable(priv, 1);
641 }
642 mode = RKCIF_STREAM_MODE_TOISP;
643 } else if (priv->mode.rdbk_mode == RKISP_VICAP_RDBK_AUTO) {
644 mode = RKCIF_STREAM_MODE_TOISP_RDBK;
645 }
646
647 if (priv->hdr_cfg.hdr_mode == NO_HDR ||
648 priv->hdr_cfg.hdr_mode == HDR_COMPR) {
649 rkcif_do_start_stream(&cif_dev->stream[0], mode);
650 } else if (priv->hdr_cfg.hdr_mode == HDR_X2) {
651 rkcif_do_start_stream(&cif_dev->stream[0], mode);
652 rkcif_do_start_stream(&cif_dev->stream[1], mode);
653 } else if (priv->hdr_cfg.hdr_mode == HDR_X3) {
654 rkcif_do_start_stream(&cif_dev->stream[0], mode);
655 rkcif_do_start_stream(&cif_dev->stream[1], mode);
656 rkcif_do_start_stream(&cif_dev->stream[2], mode);
657 }
658 INIT_LIST_HEAD(&priv->buf_free_list);
659 return 0;
660 }
661
sditf_stop_stream(struct sditf_priv * priv)662 static int sditf_stop_stream(struct sditf_priv *priv)
663 {
664 struct rkcif_device *cif_dev = priv->cif_dev;
665 unsigned int mode = RKCIF_STREAM_MODE_TOISP;
666
667 if (priv->toisp_inf.link_mode == TOISP0) {
668 sditf_channel_disable(priv, 0);
669 } else if (priv->toisp_inf.link_mode == TOISP1) {
670 sditf_channel_disable(priv, 1);
671 } else if (priv->toisp_inf.link_mode == TOISP_UNITE) {
672 sditf_channel_disable(priv, 0);
673 sditf_channel_disable(priv, 1);
674 }
675
676 if (priv->mode.rdbk_mode == RKISP_VICAP_ONLINE)
677 mode = RKCIF_STREAM_MODE_TOISP;
678 else if (priv->mode.rdbk_mode == RKISP_VICAP_RDBK_AUTO)
679 mode = RKCIF_STREAM_MODE_TOISP_RDBK;
680
681 if (priv->hdr_cfg.hdr_mode == NO_HDR ||
682 priv->hdr_cfg.hdr_mode == HDR_COMPR) {
683 rkcif_do_stop_stream(&cif_dev->stream[0], mode);
684 } else if (priv->hdr_cfg.hdr_mode == HDR_X2) {
685 rkcif_do_stop_stream(&cif_dev->stream[0], mode);
686 rkcif_do_stop_stream(&cif_dev->stream[1], mode);
687 } else if (priv->hdr_cfg.hdr_mode == HDR_X3) {
688 rkcif_do_stop_stream(&cif_dev->stream[0], mode);
689 rkcif_do_stop_stream(&cif_dev->stream[1], mode);
690 rkcif_do_stop_stream(&cif_dev->stream[2], mode);
691 }
692 return 0;
693 }
694
sditf_s_stream(struct v4l2_subdev * sd,int on)695 static int sditf_s_stream(struct v4l2_subdev *sd, int on)
696 {
697 struct sditf_priv *priv = to_sditf_priv(sd);
698 struct rkcif_device *cif_dev = priv->cif_dev;
699 int ret = 0;
700
701 if (!on && atomic_dec_return(&priv->stream_cnt))
702 return 0;
703
704 if (on && atomic_inc_return(&priv->stream_cnt) > 1)
705 return 0;
706
707 if (cif_dev->chip_id >= CHIP_RK3588_CIF) {
708 if (priv->mode.rdbk_mode == RKISP_VICAP_RDBK_AIQ)
709 return 0;
710 v4l2_dbg(1, rkcif_debug, &cif_dev->v4l2_dev,
711 "%s, toisp mode %d, hdr %d, stream on %d\n",
712 __func__, priv->toisp_inf.link_mode, priv->hdr_cfg.hdr_mode, on);
713 if (on) {
714 ret = sditf_start_stream(priv);
715 } else {
716 ret = sditf_stop_stream(priv);
717 sditf_free_buf(priv);
718 }
719
720 }
721 return ret;
722 }
723
sditf_s_power(struct v4l2_subdev * sd,int on)724 static int sditf_s_power(struct v4l2_subdev *sd, int on)
725 {
726 struct sditf_priv *priv = to_sditf_priv(sd);
727 struct rkcif_device *cif_dev = priv->cif_dev;
728 struct rkcif_vdev_node *node = &cif_dev->stream[0].vnode;
729 int ret = 0;
730
731 if (!on && atomic_dec_return(&priv->power_cnt))
732 return 0;
733
734 if (on && atomic_inc_return(&priv->power_cnt) > 1)
735 return 0;
736
737 if (cif_dev->chip_id >= CHIP_RK3588_CIF) {
738 v4l2_dbg(1, rkcif_debug, &cif_dev->v4l2_dev,
739 "%s, toisp mode %d, hdr %d, set power %d\n",
740 __func__, priv->toisp_inf.link_mode, priv->hdr_cfg.hdr_mode, on);
741 mutex_lock(&cif_dev->stream_lock);
742 if (on) {
743 ret = pm_runtime_resume_and_get(cif_dev->dev);
744 ret |= v4l2_pipeline_pm_get(&node->vdev.entity);
745 } else {
746 v4l2_pipeline_pm_put(&node->vdev.entity);
747 pm_runtime_put_sync(cif_dev->dev);
748 }
749 v4l2_info(&node->vdev, "s_power %d, entity use_count %d\n",
750 on, node->vdev.entity.use_count);
751 mutex_unlock(&cif_dev->stream_lock);
752 }
753 return ret;
754 }
755
sditf_s_rx_buffer(struct v4l2_subdev * sd,void * buf,unsigned int * size)756 static int sditf_s_rx_buffer(struct v4l2_subdev *sd,
757 void *buf, unsigned int *size)
758 {
759 struct sditf_priv *priv = to_sditf_priv(sd);
760 struct rkcif_device *cif_dev = priv->cif_dev;
761 struct rkcif_sensor_info *sensor = &cif_dev->terminal_sensor;
762 struct rkcif_stream *stream = NULL;
763 struct rkisp_rx_buf *dbufs;
764 struct rkcif_rx_buffer *rx_buf = NULL;
765 unsigned long flags, buffree_flags;
766 u32 diff_time = 1000000;
767 u32 early_time = 0;
768 bool is_free = false;
769
770 if (!buf) {
771 v4l2_err(&cif_dev->v4l2_dev, "buf is NULL\n");
772 return -EINVAL;
773 }
774
775 dbufs = buf;
776 if (cif_dev->hdr.hdr_mode == NO_HDR) {
777 if (dbufs->type == BUF_SHORT)
778 stream = &cif_dev->stream[0];
779 else
780 return -EINVAL;
781 } else if (cif_dev->hdr.hdr_mode == HDR_X2) {
782 if (dbufs->type == BUF_SHORT)
783 stream = &cif_dev->stream[1];
784 else if (dbufs->type == BUF_MIDDLE)
785 stream = &cif_dev->stream[0];
786 else
787 return -EINVAL;
788 } else if (cif_dev->hdr.hdr_mode == HDR_X3) {
789 if (dbufs->type == BUF_SHORT)
790 stream = &cif_dev->stream[2];
791 else if (dbufs->type == BUF_MIDDLE)
792 stream = &cif_dev->stream[1];
793 else if (dbufs->type == BUF_LONG)
794 stream = &cif_dev->stream[0];
795 else
796 return -EINVAL;
797 }
798
799 if (!stream)
800 return -EINVAL;
801
802 rx_buf = to_cif_rx_buf(dbufs);
803
804 spin_lock_irqsave(&stream->vbq_lock, flags);
805 stream->last_rx_buf_idx = dbufs->sequence + 1;
806 atomic_inc(&stream->buf_cnt);
807
808 if (!list_empty(&stream->rx_buf_head) &&
809 cif_dev->is_thunderboot &&
810 (dbufs->type == BUF_SHORT ||
811 (dbufs->type != BUF_SHORT && (!dbufs->is_switch)))) {
812 spin_lock_irqsave(&cif_dev->buffree_lock, buffree_flags);
813 list_add_tail(&rx_buf->list_free, &priv->buf_free_list);
814 spin_unlock_irqrestore(&cif_dev->buffree_lock, buffree_flags);
815 atomic_dec(&stream->buf_cnt);
816 stream->total_buf_num--;
817 schedule_work(&priv->buffree_work.work);
818 is_free = true;
819 }
820
821 if (!is_free && (!dbufs->is_switch)) {
822 list_add_tail(&rx_buf->list, &stream->rx_buf_head);
823 rkcif_assign_check_buffer_update_toisp(stream);
824 if (cif_dev->rdbk_debug) {
825 u32 offset = 0;
826
827 offset = rx_buf->dummy.size - stream->pixm.plane_fmt[0].bytesperline * 3;
828 memset(rx_buf->dummy.vaddr + offset,
829 0x00, stream->pixm.plane_fmt[0].bytesperline * 3);
830 if (cif_dev->is_thunderboot)
831 dma_sync_single_for_device(cif_dev->dev,
832 rx_buf->dummy.dma_addr + rx_buf->dummy.size -
833 stream->pixm.plane_fmt[0].bytesperline * 3,
834 stream->pixm.plane_fmt[0].bytesperline * 3,
835 DMA_FROM_DEVICE);
836 else
837 cif_dev->hw_dev->mem_ops->prepare(rx_buf->dummy.mem_priv);
838 }
839 }
840
841 if (dbufs->is_switch && dbufs->type == BUF_SHORT) {
842 if (stream->is_in_vblank)
843 sditf_change_to_online(priv);
844 else
845 stream->is_change_toisp = true;
846 v4l2_dbg(3, rkcif_debug, &cif_dev->v4l2_dev,
847 "switch to online mode\n");
848 }
849 spin_unlock_irqrestore(&stream->vbq_lock, flags);
850
851 if (dbufs->runtime_us && cif_dev->early_line == 0) {
852 if (!cif_dev->sensor_linetime)
853 cif_dev->sensor_linetime = rkcif_get_linetime(stream);
854 cif_dev->isp_runtime_max = dbufs->runtime_us;
855 if (cif_dev->is_thunderboot)
856 diff_time = 200000;
857 else
858 diff_time = 1000000;
859 if (dbufs->runtime_us * 1000 + cif_dev->sensor_linetime > diff_time)
860 early_time = dbufs->runtime_us * 1000 - diff_time;
861 else
862 early_time = diff_time;
863 cif_dev->early_line = div_u64(early_time, cif_dev->sensor_linetime);
864 cif_dev->wait_line_cache = sensor->raw_rect.height - cif_dev->early_line;
865 if (cif_dev->rdbk_debug &&
866 dbufs->sequence < 15)
867 v4l2_info(&cif_dev->v4l2_dev,
868 "%s, isp runtime %d, line time %d, early_line %d, line_intr_cnt %d, seq %d, type %d, dma_addr %x\n",
869 __func__, dbufs->runtime_us, cif_dev->sensor_linetime,
870 cif_dev->early_line, cif_dev->wait_line_cache,
871 dbufs->sequence, dbufs->type, (u32)rx_buf->dummy.dma_addr);
872 } else {
873 if (dbufs->runtime_us < cif_dev->isp_runtime_max) {
874 cif_dev->isp_runtime_max = dbufs->runtime_us;
875 if (cif_dev->is_thunderboot)
876 diff_time = 200000;
877 else
878 diff_time = 1000000;
879 if (dbufs->runtime_us * 1000 + cif_dev->sensor_linetime > diff_time)
880 early_time = dbufs->runtime_us * 1000 - diff_time;
881 else
882 early_time = diff_time;
883 cif_dev->early_line = div_u64(early_time, cif_dev->sensor_linetime);
884 cif_dev->wait_line_cache = sensor->raw_rect.height - cif_dev->early_line;
885 }
886 if (cif_dev->rdbk_debug &&
887 dbufs->sequence < 15)
888 v4l2_info(&cif_dev->v4l2_dev,
889 "isp runtime %d, seq %d, type %d, early_line %d, dma addr %x\n",
890 dbufs->runtime_us, dbufs->sequence, dbufs->type,
891 cif_dev->early_line, (u32)rx_buf->dummy.dma_addr);
892 }
893 return 0;
894 }
895
896 static const struct v4l2_subdev_pad_ops sditf_subdev_pad_ops = {
897 .set_fmt = sditf_get_set_fmt,
898 .get_fmt = sditf_get_set_fmt,
899 .get_selection = sditf_get_selection,
900 .get_mbus_config = sditf_g_mbus_config,
901 };
902
903 static const struct v4l2_subdev_video_ops sditf_video_ops = {
904 .g_frame_interval = sditf_g_frame_interval,
905 .s_stream = sditf_s_stream,
906 .s_rx_buffer = sditf_s_rx_buffer,
907 };
908
909 static const struct v4l2_subdev_core_ops sditf_core_ops = {
910 .ioctl = sditf_ioctl,
911 #ifdef CONFIG_COMPAT
912 .compat_ioctl32 = sditf_compat_ioctl32,
913 #endif
914 .s_power = sditf_s_power,
915 };
916
917 static const struct v4l2_subdev_ops sditf_subdev_ops = {
918 .core = &sditf_core_ops,
919 .video = &sditf_video_ops,
920 .pad = &sditf_subdev_pad_ops,
921 };
922
rkcif_sditf_attach_cifdev(struct sditf_priv * sditf)923 static int rkcif_sditf_attach_cifdev(struct sditf_priv *sditf)
924 {
925 struct device_node *np;
926 struct platform_device *pdev;
927 struct rkcif_device *cif_dev;
928
929 np = of_parse_phandle(sditf->dev->of_node, "rockchip,cif", 0);
930 if (!np || !of_device_is_available(np)) {
931 dev_err(sditf->dev, "failed to get cif dev node\n");
932 return -ENODEV;
933 }
934
935 pdev = of_find_device_by_node(np);
936 of_node_put(np);
937 if (!pdev) {
938 dev_err(sditf->dev, "failed to get cif dev from node\n");
939 return -ENODEV;
940 }
941
942 cif_dev = platform_get_drvdata(pdev);
943 if (!cif_dev) {
944 dev_err(sditf->dev, "failed attach cif dev\n");
945 return -EINVAL;
946 }
947
948 cif_dev->sditf[cif_dev->sditf_cnt] = sditf;
949 sditf->cif_dev = cif_dev;
950 cif_dev->sditf_cnt++;
951
952 return 0;
953 }
954
955 struct sensor_async_subdev {
956 struct v4l2_async_subdev asd;
957 struct v4l2_mbus_config mbus;
958 int lanes;
959 };
960
sditf_fwnode_parse(struct device * dev,struct v4l2_fwnode_endpoint * vep,struct v4l2_async_subdev * asd)961 static int sditf_fwnode_parse(struct device *dev,
962 struct v4l2_fwnode_endpoint *vep,
963 struct v4l2_async_subdev *asd)
964 {
965 struct sensor_async_subdev *s_asd =
966 container_of(asd, struct sensor_async_subdev, asd);
967 struct v4l2_mbus_config *config = &s_asd->mbus;
968
969 if (vep->base.port != 0) {
970 dev_err(dev, "sditf has only port 0\n");
971 return -EINVAL;
972 }
973
974 if (vep->bus_type == V4L2_MBUS_CSI2_DPHY ||
975 vep->bus_type == V4L2_MBUS_CSI2_CPHY) {
976 config->type = vep->bus_type;
977 config->flags = vep->bus.mipi_csi2.flags;
978 s_asd->lanes = vep->bus.mipi_csi2.num_data_lanes;
979 } else if (vep->bus_type == V4L2_MBUS_CCP2) {
980 config->type = vep->bus_type;
981 s_asd->lanes = vep->bus.mipi_csi1.data_lane;
982 } else {
983 dev_err(dev, "type is not supported\n");
984 return -EINVAL;
985 }
986
987 switch (s_asd->lanes) {
988 case 1:
989 config->flags |= V4L2_MBUS_CSI2_1_LANE;
990 break;
991 case 2:
992 config->flags |= V4L2_MBUS_CSI2_2_LANE;
993 break;
994 case 3:
995 config->flags |= V4L2_MBUS_CSI2_3_LANE;
996 break;
997 case 4:
998 config->flags |= V4L2_MBUS_CSI2_4_LANE;
999 break;
1000 default:
1001 return -EINVAL;
1002 }
1003
1004 return 0;
1005 }
1006
rkcif_sditf_get_ctrl(struct v4l2_ctrl * ctrl)1007 static int rkcif_sditf_get_ctrl(struct v4l2_ctrl *ctrl)
1008 {
1009 struct sditf_priv *priv = container_of(ctrl->handler,
1010 struct sditf_priv,
1011 ctrl_handler);
1012 struct v4l2_ctrl *sensor_ctrl = NULL;
1013
1014 switch (ctrl->id) {
1015 case V4L2_CID_PIXEL_RATE:
1016 if (priv->cif_dev->terminal_sensor.sd) {
1017 sensor_ctrl = v4l2_ctrl_find(priv->cif_dev->terminal_sensor.sd->ctrl_handler, V4L2_CID_PIXEL_RATE);
1018 if (sensor_ctrl) {
1019 ctrl->val = v4l2_ctrl_g_ctrl_int64(sensor_ctrl);
1020 __v4l2_ctrl_s_ctrl_int64(priv->pixel_rate, ctrl->val);
1021 v4l2_dbg(1, rkcif_debug, &priv->cif_dev->v4l2_dev,
1022 "%s, %s pixel rate %d\n",
1023 __func__, priv->cif_dev->terminal_sensor.sd->name, ctrl->val);
1024 return 0;
1025 } else {
1026 return -EINVAL;
1027 }
1028 }
1029 return -EINVAL;
1030 default:
1031 return -EINVAL;
1032 }
1033 }
1034
1035 static const struct v4l2_ctrl_ops rkcif_sditf_ctrl_ops = {
1036 .g_volatile_ctrl = rkcif_sditf_get_ctrl,
1037 };
1038
sditf_notifier_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)1039 static int sditf_notifier_bound(struct v4l2_async_notifier *notifier,
1040 struct v4l2_subdev *subdev,
1041 struct v4l2_async_subdev *asd)
1042 {
1043 struct sditf_priv *sditf = container_of(notifier,
1044 struct sditf_priv, notifier);
1045 struct media_entity *source_entity, *sink_entity;
1046 int ret = 0;
1047
1048 sditf->sensor_sd = subdev;
1049
1050 if (sditf->num_sensors == 1) {
1051 v4l2_err(subdev,
1052 "%s: the num of subdev is beyond %d\n",
1053 __func__, sditf->num_sensors);
1054 return -EBUSY;
1055 }
1056
1057 if (sditf->sd.entity.pads[0].flags & MEDIA_PAD_FL_SINK) {
1058 source_entity = &subdev->entity;
1059 sink_entity = &sditf->sd.entity;
1060
1061 ret = media_create_pad_link(source_entity,
1062 0,
1063 sink_entity,
1064 0,
1065 MEDIA_LNK_FL_ENABLED);
1066 if (ret)
1067 v4l2_err(&sditf->sd, "failed to create link for %s\n",
1068 sditf->sensor_sd->name);
1069 }
1070 sditf->sensor_sd = subdev;
1071 ++sditf->num_sensors;
1072
1073 v4l2_err(subdev, "Async registered subdev\n");
1074
1075 return 0;
1076 }
1077
sditf_notifier_unbind(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)1078 static void sditf_notifier_unbind(struct v4l2_async_notifier *notifier,
1079 struct v4l2_subdev *sd,
1080 struct v4l2_async_subdev *asd)
1081 {
1082 struct sditf_priv *sditf = container_of(notifier,
1083 struct sditf_priv,
1084 notifier);
1085
1086 sditf->sensor_sd = NULL;
1087 }
1088
1089 static const struct v4l2_async_notifier_operations sditf_notifier_ops = {
1090 .bound = sditf_notifier_bound,
1091 .unbind = sditf_notifier_unbind,
1092 };
1093
sditf_subdev_notifier(struct sditf_priv * sditf)1094 static int sditf_subdev_notifier(struct sditf_priv *sditf)
1095 {
1096 struct v4l2_async_notifier *ntf = &sditf->notifier;
1097 int ret;
1098
1099 v4l2_async_notifier_init(ntf);
1100
1101 ret = v4l2_async_notifier_parse_fwnode_endpoints_by_port(
1102 sditf->dev, &sditf->notifier,
1103 sizeof(struct sensor_async_subdev), 0,
1104 sditf_fwnode_parse);
1105 if (ret < 0)
1106 return ret;
1107
1108 sditf->sd.subdev_notifier = &sditf->notifier;
1109 sditf->notifier.ops = &sditf_notifier_ops;
1110
1111 ret = v4l2_async_subdev_notifier_register(&sditf->sd, &sditf->notifier);
1112 if (ret) {
1113 v4l2_err(&sditf->sd,
1114 "failed to register async notifier : %d\n",
1115 ret);
1116 v4l2_async_notifier_cleanup(&sditf->notifier);
1117 return ret;
1118 }
1119
1120 return v4l2_async_register_subdev(&sditf->sd);
1121 }
1122
rkcif_subdev_media_init(struct sditf_priv * priv)1123 static int rkcif_subdev_media_init(struct sditf_priv *priv)
1124 {
1125 struct rkcif_device *cif_dev = priv->cif_dev;
1126 struct v4l2_ctrl_handler *handler = &priv->ctrl_handler;
1127 unsigned long flags = V4L2_CTRL_FLAG_VOLATILE;
1128 int ret;
1129 int pad_num = 0;
1130
1131 if (priv->is_combine_mode) {
1132 priv->pads[0].flags = MEDIA_PAD_FL_SINK;
1133 priv->pads[1].flags = MEDIA_PAD_FL_SOURCE;
1134 pad_num = 2;
1135 } else {
1136 priv->pads[0].flags = MEDIA_PAD_FL_SOURCE;
1137 pad_num = 1;
1138 }
1139 priv->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_COMPOSER;
1140 ret = media_entity_pads_init(&priv->sd.entity, pad_num, priv->pads);
1141 if (ret < 0)
1142 return ret;
1143
1144 ret = v4l2_ctrl_handler_init(handler, 1);
1145 if (ret)
1146 return ret;
1147 priv->pixel_rate = v4l2_ctrl_new_std(handler, &rkcif_sditf_ctrl_ops,
1148 V4L2_CID_PIXEL_RATE,
1149 0, SDITF_PIXEL_RATE_MAX,
1150 1, SDITF_PIXEL_RATE_MAX);
1151 if (priv->pixel_rate)
1152 priv->pixel_rate->flags |= flags;
1153 priv->sd.ctrl_handler = handler;
1154 if (handler->error) {
1155 v4l2_ctrl_handler_free(handler);
1156 return handler->error;
1157 }
1158
1159 strncpy(priv->sd.name, dev_name(cif_dev->dev), sizeof(priv->sd.name));
1160 priv->cap_info.width = 0;
1161 priv->cap_info.height = 0;
1162 priv->mode.rdbk_mode = RKISP_VICAP_RDBK_AIQ;
1163 priv->toisp_inf.link_mode = TOISP_NONE;
1164 priv->toisp_inf.ch_info[0].is_valid = false;
1165 priv->toisp_inf.ch_info[1].is_valid = false;
1166 priv->toisp_inf.ch_info[2].is_valid = false;
1167 if (priv->is_combine_mode)
1168 sditf_subdev_notifier(priv);
1169 atomic_set(&priv->power_cnt, 0);
1170 atomic_set(&priv->stream_cnt, 0);
1171 INIT_WORK(&priv->buffree_work.work, sditf_buffree_work);
1172 INIT_LIST_HEAD(&priv->buf_free_list);
1173 return 0;
1174 }
1175
rkcif_subdev_probe(struct platform_device * pdev)1176 static int rkcif_subdev_probe(struct platform_device *pdev)
1177 {
1178 struct device *dev = &pdev->dev;
1179 struct v4l2_subdev *sd;
1180 struct sditf_priv *priv;
1181 struct device_node *node = dev->of_node;
1182 int ret;
1183
1184 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1185 if (!priv)
1186 return -ENOMEM;
1187 priv->dev = dev;
1188
1189 sd = &priv->sd;
1190 v4l2_subdev_init(sd, &sditf_subdev_ops);
1191 sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
1192 snprintf(sd->name, sizeof(sd->name), "rockchip-cif-sditf");
1193 sd->dev = dev;
1194
1195 platform_set_drvdata(pdev, &sd->entity);
1196
1197 ret = rkcif_sditf_attach_cifdev(priv);
1198 if (ret < 0)
1199 return ret;
1200
1201 ret = of_property_read_u32(node,
1202 "rockchip,combine-index",
1203 &priv->combine_index);
1204 if (ret) {
1205 priv->is_combine_mode = false;
1206 priv->combine_index = 0;
1207 } else {
1208 priv->is_combine_mode = true;
1209 }
1210 ret = rkcif_subdev_media_init(priv);
1211 if (ret < 0)
1212 return ret;
1213
1214 pm_runtime_enable(&pdev->dev);
1215 return 0;
1216 }
1217
rkcif_subdev_remove(struct platform_device * pdev)1218 static int rkcif_subdev_remove(struct platform_device *pdev)
1219 {
1220 struct media_entity *me = platform_get_drvdata(pdev);
1221 struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(me);
1222
1223 media_entity_cleanup(&sd->entity);
1224
1225 pm_runtime_disable(&pdev->dev);
1226 return 0;
1227 }
1228
1229 static const struct of_device_id rkcif_subdev_match_id[] = {
1230 {
1231 .compatible = "rockchip,rkcif-sditf",
1232 },
1233 {}
1234 };
1235 MODULE_DEVICE_TABLE(of, rkcif_subdev_match_id);
1236
1237 struct platform_driver rkcif_subdev_driver = {
1238 .probe = rkcif_subdev_probe,
1239 .remove = rkcif_subdev_remove,
1240 .driver = {
1241 .name = "rkcif_sditf",
1242 .of_match_table = rkcif_subdev_match_id,
1243 },
1244 };
1245 EXPORT_SYMBOL(rkcif_subdev_driver);
1246
1247 MODULE_AUTHOR("Rockchip Camera/ISP team");
1248 MODULE_DESCRIPTION("Rockchip CIF platform driver");
1249 MODULE_LICENSE("GPL v2");
1250