1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Rockchip CIF Driver
4 *
5 * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
6 */
7 #include <linux/clk.h>
8 #include <linux/delay.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/of_gpio.h>
13 #include <linux/of_graph.h>
14 #include <linux/of_platform.h>
15 #include <linux/of_reserved_mem.h>
16 #include <linux/reset.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/pinctrl/consumer.h>
19 #include <linux/regmap.h>
20 #include <media/videobuf2-dma-contig.h>
21 #include <media/v4l2-fwnode.h>
22 #include <linux/iommu.h>
23 #include <dt-bindings/soc/rockchip-system-status.h>
24 #include <soc/rockchip/rockchip-system-status.h>
25 #include <linux/io.h>
26 #include <linux/mfd/syscon.h>
27 #include "dev.h"
28 #include "procfs.h"
29 #include <linux/kthread.h>
30 #include "../../../../phy/rockchip/phy-rockchip-csi2-dphy-common.h"
31 #include <linux/of_reserved_mem.h>
32 #include <linux/of_address.h>
33
34 #define RKCIF_VERNO_LEN 10
35
36 int rkcif_debug;
37 module_param_named(debug, rkcif_debug, int, 0644);
38 MODULE_PARM_DESC(debug, "Debug level (0-1)");
39
40 static char rkcif_version[RKCIF_VERNO_LEN];
41 module_param_string(version, rkcif_version, RKCIF_VERNO_LEN, 0444);
42 MODULE_PARM_DESC(version, "version number");
43
44 static DEFINE_MUTEX(rkcif_dev_mutex);
45 static LIST_HEAD(rkcif_device_list);
46
47 /* show the compact mode of each stream in stream index order,
48 * 1 for compact, 0 for 16bit
49 */
rkcif_show_compact_mode(struct device * dev,struct device_attribute * attr,char * buf)50 static ssize_t rkcif_show_compact_mode(struct device *dev,
51 struct device_attribute *attr,
52 char *buf)
53 {
54 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
55 int ret;
56
57 ret = snprintf(buf, PAGE_SIZE, "%d %d %d %d\n",
58 cif_dev->stream[0].is_compact ? 1 : 0,
59 cif_dev->stream[1].is_compact ? 1 : 0,
60 cif_dev->stream[2].is_compact ? 1 : 0,
61 cif_dev->stream[3].is_compact ? 1 : 0);
62 return ret;
63 }
64
rkcif_store_compact_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)65 static ssize_t rkcif_store_compact_mode(struct device *dev,
66 struct device_attribute *attr,
67 const char *buf, size_t len)
68 {
69 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
70 int i, index;
71 char val[4];
72
73 if (buf) {
74 index = 0;
75 for (i = 0; i < len; i++) {
76 if (buf[i] == ' ') {
77 continue;
78 } else if (buf[i] == '\0') {
79 break;
80 } else {
81 val[index] = buf[i];
82 index++;
83 if (index == 4)
84 break;
85 }
86 }
87
88 for (i = 0; i < index; i++) {
89 if (val[i] - '0' == 0)
90 cif_dev->stream[i].is_compact = false;
91 else
92 cif_dev->stream[i].is_compact = true;
93 }
94 }
95
96 return len;
97 }
98
99 static DEVICE_ATTR(compact_test, S_IWUSR | S_IRUSR,
100 rkcif_show_compact_mode, rkcif_store_compact_mode);
101
rkcif_show_line_int_num(struct device * dev,struct device_attribute * attr,char * buf)102 static ssize_t rkcif_show_line_int_num(struct device *dev,
103 struct device_attribute *attr,
104 char *buf)
105 {
106 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
107 int ret;
108
109 ret = snprintf(buf, PAGE_SIZE, "%d\n",
110 cif_dev->wait_line_cache);
111 return ret;
112 }
113
rkcif_store_line_int_num(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)114 static ssize_t rkcif_store_line_int_num(struct device *dev,
115 struct device_attribute *attr,
116 const char *buf, size_t len)
117 {
118 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
119 struct sditf_priv *priv = cif_dev->sditf[0];
120 int val = 0;
121 int ret = 0;
122
123 if (priv && priv->mode.rdbk_mode == RKISP_VICAP_ONLINE) {
124 dev_info(cif_dev->dev,
125 "current mode is on the fly, wake up mode wouldn't used\n");
126 return len;
127 }
128 ret = kstrtoint(buf, 0, &val);
129 if (!ret && val >= 0 && val <= 0x3fff)
130 cif_dev->wait_line_cache = val;
131 else
132 dev_info(cif_dev->dev, "set line int num failed\n");
133 return len;
134 }
135
136 static DEVICE_ATTR(wait_line, S_IWUSR | S_IRUSR,
137 rkcif_show_line_int_num, rkcif_store_line_int_num);
138
rkcif_show_dummybuf_mode(struct device * dev,struct device_attribute * attr,char * buf)139 static ssize_t rkcif_show_dummybuf_mode(struct device *dev,
140 struct device_attribute *attr,
141 char *buf)
142 {
143 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
144 int ret;
145
146 ret = snprintf(buf, PAGE_SIZE, "%d\n",
147 cif_dev->is_use_dummybuf);
148 return ret;
149 }
150
rkcif_store_dummybuf_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)151 static ssize_t rkcif_store_dummybuf_mode(struct device *dev,
152 struct device_attribute *attr,
153 const char *buf, size_t len)
154 {
155 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
156 int val = 0;
157 int ret = 0;
158
159 ret = kstrtoint(buf, 0, &val);
160 if (!ret) {
161 if (val)
162 cif_dev->is_use_dummybuf = true;
163 else
164 cif_dev->is_use_dummybuf = false;
165 } else {
166 dev_info(cif_dev->dev, "set dummy buf mode failed\n");
167 }
168 return len;
169 }
170
171 static DEVICE_ATTR(is_use_dummybuf, S_IWUSR | S_IRUSR,
172 rkcif_show_dummybuf_mode, rkcif_store_dummybuf_mode);
173
174 /* show the memory mode of each stream in stream index order,
175 * 1 for high align, 0 for low align
176 */
rkcif_show_memory_mode(struct device * dev,struct device_attribute * attr,char * buf)177 static ssize_t rkcif_show_memory_mode(struct device *dev,
178 struct device_attribute *attr,
179 char *buf)
180 {
181 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
182 int ret;
183
184 ret = snprintf(buf, PAGE_SIZE,
185 "stream[0~3] %d %d %d %d, 0(low align) 1(high align) 2(compact)\n",
186 cif_dev->stream[0].is_compact ? 2 : (cif_dev->stream[0].is_high_align ? 1 : 0),
187 cif_dev->stream[1].is_compact ? 2 : (cif_dev->stream[1].is_high_align ? 1 : 0),
188 cif_dev->stream[2].is_compact ? 2 : (cif_dev->stream[2].is_high_align ? 1 : 0),
189 cif_dev->stream[3].is_compact ? 2 : (cif_dev->stream[3].is_high_align ? 1 : 0));
190 return ret;
191 }
192
rkcif_store_memory_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)193 static ssize_t rkcif_store_memory_mode(struct device *dev,
194 struct device_attribute *attr,
195 const char *buf, size_t len)
196 {
197 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
198 int i, index;
199 char val[4];
200
201 if (buf) {
202 index = 0;
203 for (i = 0; i < len; i++) {
204 if (buf[i] == ' ') {
205 continue;
206 } else if (buf[i] == '\0') {
207 break;
208 } else {
209 val[index] = buf[i];
210 index++;
211 if (index == 4)
212 break;
213 }
214 }
215
216 for (i = 0; i < index; i++) {
217 if (cif_dev->stream[i].is_compact) {
218 dev_info(cif_dev->dev, "stream[%d] set memory align fail, is compact mode\n",
219 i);
220 continue;
221 }
222 if (val[i] - '0' == 0)
223 cif_dev->stream[i].is_high_align = false;
224 else
225 cif_dev->stream[i].is_high_align = true;
226 }
227 }
228
229 return len;
230 }
231
232 static DEVICE_ATTR(is_high_align, S_IWUSR | S_IRUSR,
233 rkcif_show_memory_mode, rkcif_store_memory_mode);
234
rkcif_show_scale_ch0_blc(struct device * dev,struct device_attribute * attr,char * buf)235 static ssize_t rkcif_show_scale_ch0_blc(struct device *dev,
236 struct device_attribute *attr,
237 char *buf)
238 {
239 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
240 int ret;
241
242 ret = snprintf(buf, PAGE_SIZE, "ch0 pattern00: %d, pattern01: %d, pattern02: %d, pattern03: %d\n",
243 cif_dev->scale_vdev[0].blc.pattern00,
244 cif_dev->scale_vdev[0].blc.pattern01,
245 cif_dev->scale_vdev[0].blc.pattern02,
246 cif_dev->scale_vdev[0].blc.pattern03);
247 return ret;
248 }
249
rkcif_store_scale_ch0_blc(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)250 static ssize_t rkcif_store_scale_ch0_blc(struct device *dev,
251 struct device_attribute *attr,
252 const char *buf, size_t len)
253 {
254 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
255 int i = 0, index = 0;
256 unsigned int val[4] = {0};
257 unsigned int temp = 0;
258 int ret = 0;
259 int j = 0;
260 char cha[2] = {0};
261
262 if (buf) {
263 index = 0;
264 for (i = 0; i < len; i++) {
265 if (((buf[i] == ' ') || (buf[i] == '\n')) && j) {
266 index++;
267 j = 0;
268 if (index == 4)
269 break;
270 continue;
271 } else {
272 if (buf[i] < '0' || buf[i] > '9')
273 continue;
274 cha[0] = buf[i];
275 cha[1] = '\0';
276 ret = kstrtoint(cha, 0, &temp);
277 if (!ret) {
278 if (j)
279 val[index] *= 10;
280 val[index] += temp;
281 j++;
282 }
283 }
284 }
285 if (val[0] > 255 || val[1] > 255 || val[2] > 255 || val[3] > 255)
286 return -EINVAL;
287 cif_dev->scale_vdev[0].blc.pattern00 = val[0];
288 cif_dev->scale_vdev[0].blc.pattern01 = val[1];
289 cif_dev->scale_vdev[0].blc.pattern02 = val[2];
290 cif_dev->scale_vdev[0].blc.pattern03 = val[3];
291 dev_info(cif_dev->dev,
292 "set ch0 pattern00: %d, pattern01: %d, pattern02: %d, pattern03: %d\n",
293 cif_dev->scale_vdev[0].blc.pattern00,
294 cif_dev->scale_vdev[0].blc.pattern01,
295 cif_dev->scale_vdev[0].blc.pattern02,
296 cif_dev->scale_vdev[0].blc.pattern03);
297 }
298
299 return len;
300 }
301
302 static DEVICE_ATTR(scale_ch0_blc, S_IWUSR | S_IRUSR,
303 rkcif_show_scale_ch0_blc, rkcif_store_scale_ch0_blc);
304
rkcif_show_scale_ch1_blc(struct device * dev,struct device_attribute * attr,char * buf)305 static ssize_t rkcif_show_scale_ch1_blc(struct device *dev,
306 struct device_attribute *attr,
307 char *buf)
308 {
309 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
310 int ret;
311
312 ret = snprintf(buf, PAGE_SIZE, "ch1 pattern00: %d, pattern01: %d, pattern02: %d, pattern03: %d\n",
313 cif_dev->scale_vdev[1].blc.pattern00,
314 cif_dev->scale_vdev[1].blc.pattern01,
315 cif_dev->scale_vdev[1].blc.pattern02,
316 cif_dev->scale_vdev[1].blc.pattern03);
317 return ret;
318 }
319
rkcif_store_scale_ch1_blc(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)320 static ssize_t rkcif_store_scale_ch1_blc(struct device *dev,
321 struct device_attribute *attr,
322 const char *buf, size_t len)
323 {
324 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
325 int i = 0, index = 0;
326 unsigned int val[4] = {0};
327 unsigned int temp = 0;
328 int ret = 0;
329 int j = 0;
330 char cha[2] = {0};
331
332 if (buf) {
333 index = 0;
334 for (i = 0; i < len; i++) {
335 if (((buf[i] == ' ') || (buf[i] == '\n')) && j) {
336 index++;
337 j = 0;
338 if (index == 4)
339 break;
340 continue;
341 } else {
342 if (buf[i] < '0' || buf[i] > '9')
343 continue;
344 cha[0] = buf[i];
345 cha[1] = '\0';
346 ret = kstrtoint(cha, 0, &temp);
347 if (!ret) {
348 if (j)
349 val[index] *= 10;
350 val[index] += temp;
351 j++;
352 }
353 }
354 }
355 if (val[0] > 255 || val[1] > 255 || val[2] > 255 || val[3] > 255)
356 return -EINVAL;
357
358 cif_dev->scale_vdev[1].blc.pattern00 = val[0];
359 cif_dev->scale_vdev[1].blc.pattern01 = val[1];
360 cif_dev->scale_vdev[1].blc.pattern02 = val[2];
361 cif_dev->scale_vdev[1].blc.pattern03 = val[3];
362
363 dev_info(cif_dev->dev,
364 "set ch1 pattern00: %d, pattern01: %d, pattern02: %d, pattern03: %d\n",
365 cif_dev->scale_vdev[1].blc.pattern00,
366 cif_dev->scale_vdev[1].blc.pattern01,
367 cif_dev->scale_vdev[1].blc.pattern02,
368 cif_dev->scale_vdev[1].blc.pattern03);
369 }
370
371 return len;
372 }
373
374 static DEVICE_ATTR(scale_ch1_blc, S_IWUSR | S_IRUSR,
375 rkcif_show_scale_ch1_blc, rkcif_store_scale_ch1_blc);
376
rkcif_show_scale_ch2_blc(struct device * dev,struct device_attribute * attr,char * buf)377 static ssize_t rkcif_show_scale_ch2_blc(struct device *dev,
378 struct device_attribute *attr,
379 char *buf)
380 {
381 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
382 int ret;
383
384 ret = snprintf(buf, PAGE_SIZE, "ch2 pattern00: %d, pattern01: %d, pattern02: %d, pattern03: %d\n",
385 cif_dev->scale_vdev[2].blc.pattern00,
386 cif_dev->scale_vdev[2].blc.pattern01,
387 cif_dev->scale_vdev[2].blc.pattern02,
388 cif_dev->scale_vdev[2].blc.pattern03);
389 return ret;
390 }
391
rkcif_store_scale_ch2_blc(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)392 static ssize_t rkcif_store_scale_ch2_blc(struct device *dev,
393 struct device_attribute *attr,
394 const char *buf, size_t len)
395 {
396 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
397 int i = 0, index = 0;
398 unsigned int val[4] = {0};
399 unsigned int temp = 0;
400 int ret = 0;
401 int j = 0;
402 char cha[2] = {0};
403
404 if (buf) {
405 index = 0;
406 for (i = 0; i < len; i++) {
407 if (((buf[i] == ' ') || (buf[i] == '\n')) && j) {
408 index++;
409 j = 0;
410 if (index == 4)
411 break;
412 continue;
413 } else {
414 if (buf[i] < '0' || buf[i] > '9')
415 continue;
416 cha[0] = buf[i];
417 cha[1] = '\0';
418 ret = kstrtoint(cha, 0, &temp);
419 if (!ret) {
420 if (j)
421 val[index] *= 10;
422 val[index] += temp;
423 j++;
424 }
425 }
426 }
427 if (val[0] > 255 || val[1] > 255 || val[2] > 255 || val[3] > 255)
428 return -EINVAL;
429
430 cif_dev->scale_vdev[2].blc.pattern00 = val[0];
431 cif_dev->scale_vdev[2].blc.pattern01 = val[1];
432 cif_dev->scale_vdev[2].blc.pattern02 = val[2];
433 cif_dev->scale_vdev[2].blc.pattern03 = val[3];
434
435 dev_info(cif_dev->dev,
436 "set ch2 pattern00: %d, pattern01: %d, pattern02: %d, pattern03: %d\n",
437 cif_dev->scale_vdev[2].blc.pattern00,
438 cif_dev->scale_vdev[2].blc.pattern01,
439 cif_dev->scale_vdev[2].blc.pattern02,
440 cif_dev->scale_vdev[2].blc.pattern03);
441 }
442
443 return len;
444 }
445 static DEVICE_ATTR(scale_ch2_blc, S_IWUSR | S_IRUSR,
446 rkcif_show_scale_ch2_blc, rkcif_store_scale_ch2_blc);
447
rkcif_show_scale_ch3_blc(struct device * dev,struct device_attribute * attr,char * buf)448 static ssize_t rkcif_show_scale_ch3_blc(struct device *dev,
449 struct device_attribute *attr,
450 char *buf)
451 {
452 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
453 int ret;
454
455 ret = snprintf(buf, PAGE_SIZE, "ch3 pattern00: %d, pattern01: %d, pattern02: %d, pattern03: %d\n",
456 cif_dev->scale_vdev[3].blc.pattern00,
457 cif_dev->scale_vdev[3].blc.pattern01,
458 cif_dev->scale_vdev[3].blc.pattern02,
459 cif_dev->scale_vdev[3].blc.pattern03);
460 return ret;
461 }
462
rkcif_store_scale_ch3_blc(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)463 static ssize_t rkcif_store_scale_ch3_blc(struct device *dev,
464 struct device_attribute *attr,
465 const char *buf, size_t len)
466 {
467 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
468 int i = 0, index = 0;
469 unsigned int val[4] = {0};
470 unsigned int temp = 0;
471 int ret = 0;
472 int j = 0;
473 char cha[2] = {0};
474
475 if (buf) {
476 index = 0;
477 for (i = 0; i < len; i++) {
478 if (((buf[i] == ' ') || (buf[i] == '\n')) && j) {
479 index++;
480 j = 0;
481 if (index == 4)
482 break;
483 continue;
484 } else {
485 if (buf[i] < '0' || buf[i] > '9')
486 continue;
487 cha[0] = buf[i];
488 cha[1] = '\0';
489 ret = kstrtoint(cha, 0, &temp);
490 if (!ret) {
491 if (j)
492 val[index] *= 10;
493 val[index] += temp;
494 j++;
495 }
496 }
497 }
498 if (val[0] > 255 || val[1] > 255 || val[2] > 255 || val[3] > 255)
499 return -EINVAL;
500
501 cif_dev->scale_vdev[3].blc.pattern00 = val[0];
502 cif_dev->scale_vdev[3].blc.pattern01 = val[1];
503 cif_dev->scale_vdev[3].blc.pattern02 = val[2];
504 cif_dev->scale_vdev[3].blc.pattern03 = val[3];
505
506 dev_info(cif_dev->dev,
507 "set ch3 pattern00: %d, pattern01: %d, pattern02: %d, pattern03: %d\n",
508 cif_dev->scale_vdev[3].blc.pattern00,
509 cif_dev->scale_vdev[3].blc.pattern01,
510 cif_dev->scale_vdev[3].blc.pattern02,
511 cif_dev->scale_vdev[3].blc.pattern03);
512 }
513
514 return len;
515 }
516
517 static DEVICE_ATTR(scale_ch3_blc, S_IWUSR | S_IRUSR,
518 rkcif_show_scale_ch3_blc, rkcif_store_scale_ch3_blc);
519
rkcif_store_capture_fps(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)520 static ssize_t rkcif_store_capture_fps(struct device *dev,
521 struct device_attribute *attr,
522 const char *buf, size_t len)
523 {
524 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
525 struct rkcif_stream *stream = NULL;
526 int i = 0, index = 0;
527 unsigned int val[4] = {0};
528 unsigned int temp = 0;
529 int ret = 0;
530 int j = 0;
531 char cha[2] = {0};
532 struct rkcif_fps fps = {0};
533
534 if (buf) {
535 index = 0;
536 for (i = 0; i < len; i++) {
537 if (((buf[i] == ' ') || (buf[i] == '\n')) && j) {
538 index++;
539 j = 0;
540 if (index == 4)
541 break;
542 continue;
543 } else {
544 if (buf[i] < '0' || buf[i] > '9')
545 continue;
546 cha[0] = buf[i];
547 cha[1] = '\0';
548 ret = kstrtoint(cha, 0, &temp);
549 if (!ret) {
550 if (j)
551 val[index] *= 10;
552 val[index] += temp;
553 j++;
554 }
555 }
556 }
557
558 for (i = 0; i < index; i++) {
559 if ((val[i] - '0' != 0) && cif_dev->chip_id >= CHIP_RV1106_CIF) {
560 stream = &cif_dev->stream[i];
561 fps.fps = val[i];
562 rkcif_set_fps(stream, &fps);
563 }
564 }
565 dev_info(cif_dev->dev,
566 "set fps id0: %d, id1: %d, id2: %d, id3: %d\n",
567 val[0], val[1], val[2], val[3]);
568 }
569
570 return len;
571 }
572 static DEVICE_ATTR(fps, 0200, NULL, rkcif_store_capture_fps);
573
rkcif_show_rdbk_debug(struct device * dev,struct device_attribute * attr,char * buf)574 static ssize_t rkcif_show_rdbk_debug(struct device *dev,
575 struct device_attribute *attr,
576 char *buf)
577 {
578 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
579 int ret;
580
581 ret = snprintf(buf, PAGE_SIZE, "%d\n",
582 cif_dev->rdbk_debug);
583 return ret;
584 }
585
rkcif_store_rdbk_debug(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)586 static ssize_t rkcif_store_rdbk_debug(struct device *dev,
587 struct device_attribute *attr,
588 const char *buf, size_t len)
589 {
590 struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
591 int val = 0;
592 int ret = 0;
593
594 ret = kstrtoint(buf, 0, &val);
595 if (!ret)
596 cif_dev->rdbk_debug = val;
597 else
598 dev_info(cif_dev->dev, "set rdbk debug failed\n");
599 return len;
600 }
601 static DEVICE_ATTR(rdbk_debug, 0200, rkcif_show_rdbk_debug, rkcif_store_rdbk_debug);
602
603 static struct attribute *dev_attrs[] = {
604 &dev_attr_compact_test.attr,
605 &dev_attr_wait_line.attr,
606 &dev_attr_is_use_dummybuf.attr,
607 &dev_attr_is_high_align.attr,
608 &dev_attr_scale_ch0_blc.attr,
609 &dev_attr_scale_ch1_blc.attr,
610 &dev_attr_scale_ch2_blc.attr,
611 &dev_attr_scale_ch3_blc.attr,
612 &dev_attr_fps.attr,
613 &dev_attr_rdbk_debug.attr,
614 NULL,
615 };
616
617 static struct attribute_group dev_attr_grp = {
618 .attrs = dev_attrs,
619 };
620
621 struct rkcif_match_data {
622 int inf_id;
623 };
624
rkcif_write_register(struct rkcif_device * dev,enum cif_reg_index index,u32 val)625 void rkcif_write_register(struct rkcif_device *dev,
626 enum cif_reg_index index, u32 val)
627 {
628 void __iomem *base = dev->hw_dev->base_addr;
629 const struct cif_reg *reg = &dev->hw_dev->cif_regs[index];
630 int csi_offset = 0;
631
632 if (dev->inf_id == RKCIF_MIPI_LVDS &&
633 index >= CIF_REG_MIPI_LVDS_ID0_CTRL0 &&
634 index <= CIF_REG_MIPI_ON_PAD) {
635 if (dev->chip_id == CHIP_RK3588_CIF) {
636 csi_offset = dev->csi_host_idx * 0x100;
637 } else if (dev->chip_id == CHIP_RV1106_CIF) {
638 csi_offset = dev->csi_host_idx * 0x200;
639 } else if (dev->chip_id == CHIP_RK3562_CIF) {
640 if (dev->csi_host_idx < 3)
641 csi_offset = dev->csi_host_idx * 0x200;
642 else
643 csi_offset = 0x500;
644 }
645 }
646 if (index < CIF_REG_INDEX_MAX) {
647 if (index == CIF_REG_DVP_CTRL || reg->offset != 0x0) {
648 write_cif_reg(base, reg->offset + csi_offset, val);
649 v4l2_dbg(4, rkcif_debug, &dev->v4l2_dev,
650 "write reg[0x%x]:0x%x!!!\n",
651 reg->offset + csi_offset, val);
652 } else {
653 v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
654 "write reg[%d]:0x%x failed, maybe useless!!!\n",
655 index, val);
656 }
657 }
658 }
659
rkcif_write_register_or(struct rkcif_device * dev,enum cif_reg_index index,u32 val)660 void rkcif_write_register_or(struct rkcif_device *dev,
661 enum cif_reg_index index, u32 val)
662 {
663 unsigned int reg_val = 0x0;
664 void __iomem *base = dev->hw_dev->base_addr;
665 const struct cif_reg *reg = &dev->hw_dev->cif_regs[index];
666 int csi_offset = 0;
667
668 if (dev->inf_id == RKCIF_MIPI_LVDS &&
669 index >= CIF_REG_MIPI_LVDS_ID0_CTRL0 &&
670 index <= CIF_REG_MIPI_ON_PAD) {
671 if (dev->chip_id == CHIP_RK3588_CIF) {
672 csi_offset = dev->csi_host_idx * 0x100;
673 } else if (dev->chip_id == CHIP_RV1106_CIF) {
674 csi_offset = dev->csi_host_idx * 0x200;
675 } else if (dev->chip_id == CHIP_RK3562_CIF) {
676 if (dev->csi_host_idx < 3)
677 csi_offset = dev->csi_host_idx * 0x200;
678 else
679 csi_offset = 0x500;
680 }
681 }
682
683 if (index < CIF_REG_INDEX_MAX) {
684 if (index == CIF_REG_DVP_CTRL || reg->offset != 0x0) {
685 reg_val = read_cif_reg(base, reg->offset + csi_offset);
686 reg_val |= val;
687 write_cif_reg(base, reg->offset + csi_offset, reg_val);
688 v4l2_dbg(4, rkcif_debug, &dev->v4l2_dev,
689 "write or reg[0x%x]:0x%x!!!\n",
690 reg->offset + csi_offset, val);
691 } else {
692 v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
693 "write reg[%d]:0x%x with OR failed, maybe useless!!!\n",
694 index, val);
695 }
696 }
697 }
698
rkcif_write_register_and(struct rkcif_device * dev,enum cif_reg_index index,u32 val)699 void rkcif_write_register_and(struct rkcif_device *dev,
700 enum cif_reg_index index, u32 val)
701 {
702 unsigned int reg_val = 0x0;
703 void __iomem *base = dev->hw_dev->base_addr;
704 const struct cif_reg *reg = &dev->hw_dev->cif_regs[index];
705 int csi_offset = 0;
706
707 if (dev->inf_id == RKCIF_MIPI_LVDS &&
708 index >= CIF_REG_MIPI_LVDS_ID0_CTRL0 &&
709 index <= CIF_REG_MIPI_ON_PAD) {
710 if (dev->chip_id == CHIP_RK3588_CIF) {
711 csi_offset = dev->csi_host_idx * 0x100;
712 } else if (dev->chip_id == CHIP_RV1106_CIF) {
713 csi_offset = dev->csi_host_idx * 0x200;
714 } else if (dev->chip_id == CHIP_RK3562_CIF) {
715 if (dev->csi_host_idx < 3)
716 csi_offset = dev->csi_host_idx * 0x200;
717 else
718 csi_offset = 0x500;
719 }
720 }
721
722 if (index < CIF_REG_INDEX_MAX) {
723 if (index == CIF_REG_DVP_CTRL || reg->offset != 0x0) {
724 reg_val = read_cif_reg(base, reg->offset + csi_offset);
725 reg_val &= val;
726 write_cif_reg(base, reg->offset + csi_offset, reg_val);
727 v4l2_dbg(4, rkcif_debug, &dev->v4l2_dev,
728 "write and reg[0x%x]:0x%x!!!\n",
729 reg->offset + csi_offset, val);
730 } else {
731 v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
732 "write reg[%d]:0x%x with OR failed, maybe useless!!!\n",
733 index, val);
734 }
735 }
736 }
737
rkcif_read_register(struct rkcif_device * dev,enum cif_reg_index index)738 unsigned int rkcif_read_register(struct rkcif_device *dev,
739 enum cif_reg_index index)
740 {
741 unsigned int val = 0x0;
742 void __iomem *base = dev->hw_dev->base_addr;
743 const struct cif_reg *reg = &dev->hw_dev->cif_regs[index];
744 int csi_offset = 0;
745
746 if (dev->inf_id == RKCIF_MIPI_LVDS &&
747 index >= CIF_REG_MIPI_LVDS_ID0_CTRL0 &&
748 index <= CIF_REG_MIPI_ON_PAD) {
749 if (dev->chip_id == CHIP_RK3588_CIF) {
750 csi_offset = dev->csi_host_idx * 0x100;
751 } else if (dev->chip_id == CHIP_RV1106_CIF) {
752 csi_offset = dev->csi_host_idx * 0x200;
753 } else if (dev->chip_id == CHIP_RK3562_CIF) {
754 if (dev->csi_host_idx < 3)
755 csi_offset = dev->csi_host_idx * 0x200;
756 else
757 csi_offset = 0x500;
758 }
759 }
760
761 if (index < CIF_REG_INDEX_MAX) {
762 if (index == CIF_REG_DVP_CTRL || reg->offset != 0x0)
763 val = read_cif_reg(base, reg->offset + csi_offset);
764 else
765 v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
766 "read reg[%d] failed, maybe useless!!!\n",
767 index);
768 }
769
770 return val;
771 }
772
rkcif_write_grf_reg(struct rkcif_device * dev,enum cif_reg_index index,u32 val)773 void rkcif_write_grf_reg(struct rkcif_device *dev,
774 enum cif_reg_index index, u32 val)
775 {
776 struct rkcif_hw *cif_hw = dev->hw_dev;
777 const struct cif_reg *reg = &cif_hw->cif_regs[index];
778
779 if (index < CIF_REG_INDEX_MAX) {
780 if (index > CIF_REG_DVP_CTRL) {
781 if (!IS_ERR(cif_hw->grf))
782 regmap_write(cif_hw->grf, reg->offset, val);
783 } else {
784 v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
785 "write reg[%d]:0x%x failed, maybe useless!!!\n",
786 index, val);
787 }
788 }
789 }
790
rkcif_read_grf_reg(struct rkcif_device * dev,enum cif_reg_index index)791 u32 rkcif_read_grf_reg(struct rkcif_device *dev, enum cif_reg_index index)
792 {
793 struct rkcif_hw *cif_hw = dev->hw_dev;
794 const struct cif_reg *reg = &cif_hw->cif_regs[index];
795 u32 val = 0xffff;
796
797 if (index < CIF_REG_INDEX_MAX) {
798 if (index > CIF_REG_DVP_CTRL) {
799 if (!IS_ERR(cif_hw->grf))
800 regmap_read(cif_hw->grf, reg->offset, &val);
801 } else {
802 v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
803 "read reg[%d] failed, maybe useless!!!\n",
804 index);
805 }
806 }
807
808 return val;
809 }
810
rkcif_enable_dvp_clk_dual_edge(struct rkcif_device * dev,bool on)811 void rkcif_enable_dvp_clk_dual_edge(struct rkcif_device *dev, bool on)
812 {
813 struct rkcif_hw *cif_hw = dev->hw_dev;
814 u32 val = 0x0;
815
816 if (!IS_ERR(cif_hw->grf)) {
817
818 if (dev->chip_id == CHIP_RK3568_CIF) {
819 if (on)
820 val = RK3568_CIF_PCLK_DUAL_EDGE;
821 else
822 val = RK3568_CIF_PCLK_SINGLE_EDGE;
823 rkcif_write_grf_reg(dev, CIF_REG_GRF_CIFIO_CON1, val);
824 } else if (dev->chip_id == CHIP_RV1126_CIF) {
825 if (on)
826 val = CIF_SAMPLING_EDGE_DOUBLE;
827 else
828 val = CIF_SAMPLING_EDGE_SINGLE;
829 rkcif_write_grf_reg(dev, CIF_REG_GRF_CIFIO_CON, val);
830 } else if (dev->chip_id == CHIP_RK3588_CIF) {
831 if (on)
832 val = RK3588_CIF_PCLK_DUAL_EDGE;
833 else
834 val = RK3588_CIF_PCLK_SINGLE_EDGE;
835 rkcif_write_grf_reg(dev, CIF_REG_GRF_CIFIO_CON, val);
836 } else if (dev->chip_id == CHIP_RV1106_CIF) {
837 if (on)
838 val = RV1106_CIF_PCLK_DUAL_EDGE;
839 else
840 val = RV1106_CIF_PCLK_SINGLE_EDGE;
841 rkcif_write_grf_reg(dev, CIF_REG_GRF_CIFIO_CON, val);
842 }
843 }
844
845 v4l2_info(&dev->v4l2_dev,
846 "set dual edge mode(%s,0x%x)!!!\n", on ? "on" : "off", val);
847 }
848
rkcif_config_dvp_clk_sampling_edge(struct rkcif_device * dev,enum rkcif_clk_edge edge)849 void rkcif_config_dvp_clk_sampling_edge(struct rkcif_device *dev,
850 enum rkcif_clk_edge edge)
851 {
852 struct rkcif_hw *cif_hw = dev->hw_dev;
853 u32 val = 0x0;
854
855 if (!IS_ERR(cif_hw->grf)) {
856 if (dev->chip_id == CHIP_RV1126_CIF) {
857 if (edge == RKCIF_CLK_RISING)
858 val = CIF_PCLK_SAMPLING_EDGE_RISING;
859 else
860 val = CIF_PCLK_SAMPLING_EDGE_FALLING;
861 }
862
863 if (dev->chip_id == CHIP_RK3568_CIF) {
864 if (edge == RKCIF_CLK_RISING)
865 val = RK3568_CIF_PCLK_SAMPLING_EDGE_RISING;
866 else
867 val = RK3568_CIF_PCLK_SAMPLING_EDGE_FALLING;
868 }
869
870 if (dev->chip_id == CHIP_RK3588_CIF) {
871 if (edge == RKCIF_CLK_RISING)
872 val = RK3588_CIF_PCLK_SAMPLING_EDGE_RISING;
873 else
874 val = RK3588_CIF_PCLK_SAMPLING_EDGE_FALLING;
875 }
876 if (dev->chip_id == CHIP_RV1106_CIF) {
877 if (dev->dphy_hw) {
878 if (edge == RKCIF_CLK_RISING)
879 val = RV1106_CIF_PCLK_EDGE_RISING_M0;
880 else
881 val = RV1106_CIF_PCLK_EDGE_FALLING_M0;
882 } else {
883 if (edge == RKCIF_CLK_RISING)
884 val = RV1106_CIF_PCLK_EDGE_RISING_M1;
885 else
886 val = RV1106_CIF_PCLK_EDGE_FALLING_M1;
887 rkcif_write_grf_reg(dev, CIF_REG_GRF_CIFIO_VENC, val);
888 return;
889 }
890 }
891 rkcif_write_grf_reg(dev, CIF_REG_GRF_CIFIO_CON, val);
892 }
893 }
894
rkcif_config_dvp_pin(struct rkcif_device * dev,bool on)895 void rkcif_config_dvp_pin(struct rkcif_device *dev, bool on)
896 {
897 if (dev->dphy_hw && dev->dphy_hw->ttl_mode_enable && dev->dphy_hw->ttl_mode_disable) {
898 rkcif_write_grf_reg(dev, CIF_REG_GRF_CIFIO_CON, RV1106_CIF_GRF_SEL_M0);
899 if (on)
900 dev->dphy_hw->ttl_mode_enable(dev->dphy_hw);
901 else
902 dev->dphy_hw->ttl_mode_disable(dev->dphy_hw);
903 } else {
904 rkcif_write_grf_reg(dev, CIF_REG_GRF_CIFIO_CON, RV1106_CIF_GRF_SEL_M1);
905 }
906 }
907
908 /**************************** pipeline operations *****************************/
__cif_pipeline_prepare(struct rkcif_pipeline * p,struct media_entity * me)909 static int __cif_pipeline_prepare(struct rkcif_pipeline *p,
910 struct media_entity *me)
911 {
912 struct v4l2_subdev *sd;
913 int i;
914
915 p->num_subdevs = 0;
916 memset(p->subdevs, 0, sizeof(p->subdevs));
917
918 while (1) {
919 struct media_pad *pad = NULL;
920
921 /* Find remote source pad */
922 for (i = 0; i < me->num_pads; i++) {
923 struct media_pad *spad = &me->pads[i];
924
925 if (!(spad->flags & MEDIA_PAD_FL_SINK))
926 continue;
927 pad = media_entity_remote_pad(spad);
928 if (pad)
929 break;
930 }
931
932 if (!pad)
933 break;
934
935 sd = media_entity_to_v4l2_subdev(pad->entity);
936 p->subdevs[p->num_subdevs++] = sd;
937 me = &sd->entity;
938 if (me->num_pads == 1)
939 break;
940 }
941
942 return 0;
943 }
944
__cif_pipeline_s_cif_clk(struct rkcif_pipeline * p)945 static int __cif_pipeline_s_cif_clk(struct rkcif_pipeline *p)
946 {
947 return 0;
948 }
949
rkcif_pipeline_open(struct rkcif_pipeline * p,struct media_entity * me,bool prepare)950 static int rkcif_pipeline_open(struct rkcif_pipeline *p,
951 struct media_entity *me,
952 bool prepare)
953 {
954 int ret;
955
956 if (WARN_ON(!p || !me))
957 return -EINVAL;
958 if (atomic_inc_return(&p->power_cnt) > 1)
959 return 0;
960
961 /* go through media graphic and get subdevs */
962 if (prepare)
963 __cif_pipeline_prepare(p, me);
964
965 if (!p->num_subdevs)
966 return -EINVAL;
967
968 ret = __cif_pipeline_s_cif_clk(p);
969 if (ret < 0)
970 return ret;
971
972 return 0;
973 }
974
rkcif_pipeline_close(struct rkcif_pipeline * p)975 static int rkcif_pipeline_close(struct rkcif_pipeline *p)
976 {
977 atomic_dec_return(&p->power_cnt);
978
979 return 0;
980 }
981
rkcif_set_sensor_streamon_in_sync_mode(struct rkcif_device * cif_dev)982 static void rkcif_set_sensor_streamon_in_sync_mode(struct rkcif_device *cif_dev)
983 {
984 struct rkcif_hw *hw = cif_dev->hw_dev;
985 struct rkcif_device *dev = NULL;
986 int i = 0, j = 0;
987 int on = 1;
988 int ret = 0;
989 bool is_streaming = false;
990 struct rkcif_multi_sync_config *sync_config;
991
992 if (!cif_dev->sync_cfg.type)
993 return;
994
995 mutex_lock(&hw->dev_lock);
996 sync_config = &hw->sync_config[cif_dev->sync_cfg.group];
997 sync_config->streaming_cnt++;
998 if (sync_config->streaming_cnt < sync_config->dev_cnt) {
999 mutex_unlock(&hw->dev_lock);
1000 return;
1001 }
1002
1003 if (sync_config->mode == RKCIF_MASTER_MASTER ||
1004 sync_config->mode == RKCIF_MASTER_SLAVE) {
1005 for (i = 0; i < sync_config->slave.count; i++) {
1006 dev = sync_config->slave.cif_dev[i];
1007 is_streaming = sync_config->slave.is_streaming[i];
1008 if (!is_streaming) {
1009 if (dev->sditf_cnt == 1) {
1010 ret = v4l2_subdev_call(dev->terminal_sensor.sd, core, ioctl,
1011 RKMODULE_SET_QUICK_STREAM, &on);
1012 if (ret)
1013 dev_info(dev->dev,
1014 "set RKMODULE_SET_QUICK_STREAM failed\n");
1015 } else {
1016 for (j = 0; j < dev->sditf_cnt; j++)
1017 ret |= v4l2_subdev_call(dev->sditf[j]->sensor_sd,
1018 core,
1019 ioctl,
1020 RKMODULE_SET_QUICK_STREAM,
1021 &on);
1022 if (ret)
1023 dev_info(dev->dev,
1024 "set RKMODULE_SET_QUICK_STREAM failed\n");
1025 }
1026 sync_config->slave.is_streaming[i] = true;
1027 }
1028 v4l2_dbg(3, rkcif_debug, &dev->v4l2_dev,
1029 "quick stream in sync mode, slave_dev[%d]\n", i);
1030
1031 }
1032 for (i = 0; i < sync_config->ext_master.count; i++) {
1033 dev = sync_config->ext_master.cif_dev[i];
1034 is_streaming = sync_config->ext_master.is_streaming[i];
1035 if (!is_streaming) {
1036 if (dev->sditf_cnt == 1) {
1037 ret = v4l2_subdev_call(dev->terminal_sensor.sd, core, ioctl,
1038 RKMODULE_SET_QUICK_STREAM, &on);
1039 if (ret)
1040 dev_info(dev->dev,
1041 "set RKMODULE_SET_QUICK_STREAM failed\n");
1042 } else {
1043 for (j = 0; j < dev->sditf_cnt; j++)
1044 ret |= v4l2_subdev_call(dev->sditf[j]->sensor_sd,
1045 core,
1046 ioctl,
1047 RKMODULE_SET_QUICK_STREAM,
1048 &on);
1049 if (ret)
1050 dev_info(dev->dev,
1051 "set RKMODULE_SET_QUICK_STREAM failed\n");
1052 }
1053 sync_config->ext_master.is_streaming[i] = true;
1054 }
1055 v4l2_dbg(3, rkcif_debug, &dev->v4l2_dev,
1056 "quick stream in sync mode, ext_master_dev[%d]\n", i);
1057 }
1058 for (i = 0; i < sync_config->int_master.count; i++) {
1059 dev = sync_config->int_master.cif_dev[i];
1060 is_streaming = sync_config->int_master.is_streaming[i];
1061 if (!is_streaming) {
1062 if (dev->sditf_cnt == 1) {
1063 ret = v4l2_subdev_call(dev->terminal_sensor.sd, core, ioctl,
1064 RKMODULE_SET_QUICK_STREAM, &on);
1065 if (ret)
1066 dev_info(hw->dev,
1067 "set RKMODULE_SET_QUICK_STREAM failed\n");
1068 } else {
1069 for (j = 0; j < dev->sditf_cnt; j++)
1070 ret |= v4l2_subdev_call(dev->sditf[j]->sensor_sd,
1071 core,
1072 ioctl,
1073 RKMODULE_SET_QUICK_STREAM,
1074 &on);
1075 if (ret)
1076 dev_info(dev->dev,
1077 "set RKMODULE_SET_QUICK_STREAM failed\n");
1078 }
1079 sync_config->int_master.is_streaming[i] = true;
1080 }
1081 v4l2_dbg(3, rkcif_debug, &dev->v4l2_dev,
1082 "quick stream in sync mode, int_master_dev[%d]\n", i);
1083 }
1084 }
1085 mutex_unlock(&hw->dev_lock);
1086 }
1087
rkcif_sensor_streaming_cb(void * data)1088 static void rkcif_sensor_streaming_cb(void *data)
1089 {
1090 struct v4l2_subdev *subdevs = (struct v4l2_subdev *)data;
1091
1092 v4l2_subdev_call(subdevs, video, s_stream, 1);
1093 }
1094
1095 /*
1096 * stream-on order: isp_subdev, mipi dphy, sensor
1097 * stream-off order: mipi dphy, sensor, isp_subdev
1098 */
rkcif_pipeline_set_stream(struct rkcif_pipeline * p,bool on)1099 static int rkcif_pipeline_set_stream(struct rkcif_pipeline *p, bool on)
1100 {
1101 struct rkcif_device *cif_dev = container_of(p, struct rkcif_device, pipe);
1102 bool can_be_set = false;
1103 int i, ret = 0;
1104
1105 if (cif_dev->hdr.hdr_mode == NO_HDR || cif_dev->hdr.hdr_mode == HDR_COMPR) {
1106 if ((on && atomic_inc_return(&p->stream_cnt) > 1) ||
1107 (!on && atomic_dec_return(&p->stream_cnt) > 0))
1108 return 0;
1109
1110 if (on) {
1111 rockchip_set_system_status(SYS_STATUS_CIF0);
1112 cif_dev->irq_stats.csi_overflow_cnt = 0;
1113 cif_dev->irq_stats.csi_bwidth_lack_cnt = 0;
1114 cif_dev->irq_stats.dvp_bus_err_cnt = 0;
1115 cif_dev->irq_stats.dvp_line_err_cnt = 0;
1116 cif_dev->irq_stats.dvp_overflow_cnt = 0;
1117 cif_dev->irq_stats.dvp_pix_err_cnt = 0;
1118 cif_dev->irq_stats.all_err_cnt = 0;
1119 cif_dev->irq_stats.csi_size_err_cnt = 0;
1120 cif_dev->irq_stats.dvp_size_err_cnt = 0;
1121 cif_dev->irq_stats.dvp_bwidth_lack_cnt = 0;
1122 cif_dev->irq_stats.frm_end_cnt[0] = 0;
1123 cif_dev->irq_stats.frm_end_cnt[1] = 0;
1124 cif_dev->irq_stats.frm_end_cnt[2] = 0;
1125 cif_dev->irq_stats.frm_end_cnt[3] = 0;
1126 cif_dev->irq_stats.not_active_buf_cnt[0] = 0;
1127 cif_dev->irq_stats.not_active_buf_cnt[1] = 0;
1128 cif_dev->irq_stats.not_active_buf_cnt[2] = 0;
1129 cif_dev->irq_stats.not_active_buf_cnt[3] = 0;
1130 cif_dev->irq_stats.trig_simult_cnt[0] = 0;
1131 cif_dev->irq_stats.trig_simult_cnt[1] = 0;
1132 cif_dev->irq_stats.trig_simult_cnt[2] = 0;
1133 cif_dev->irq_stats.trig_simult_cnt[3] = 0;
1134 cif_dev->reset_watchdog_timer.is_triggered = false;
1135 cif_dev->reset_watchdog_timer.is_running = false;
1136 cif_dev->err_state_work.last_timestamp = 0;
1137 for (i = 0; i < cif_dev->num_channels; i++)
1138 cif_dev->reset_watchdog_timer.last_buf_wakeup_cnt[i] = 0;
1139 cif_dev->reset_watchdog_timer.run_cnt = 0;
1140 }
1141
1142 /* phy -> sensor */
1143 for (i = 0; i < p->num_subdevs; i++) {
1144 if (p->subdevs[i] == cif_dev->terminal_sensor.sd &&
1145 on &&
1146 cif_dev->is_thunderboot &&
1147 !rk_tb_mcu_is_done()) {
1148 cif_dev->tb_client.data = p->subdevs[i];
1149 cif_dev->tb_client.cb = rkcif_sensor_streaming_cb;
1150 rk_tb_client_register_cb(&cif_dev->tb_client);
1151 } else {
1152 ret = v4l2_subdev_call(p->subdevs[i], video, s_stream, on);
1153 }
1154 if (on && ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
1155 goto err_stream_off;
1156 }
1157
1158 if (cif_dev->sditf_cnt > 1) {
1159 for (i = 0; i < cif_dev->sditf_cnt; i++) {
1160 ret = v4l2_subdev_call(cif_dev->sditf[i]->sensor_sd,
1161 video,
1162 s_stream,
1163 on);
1164 if (on && ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
1165 goto err_stream_off;
1166 }
1167 }
1168
1169 if (on)
1170 rkcif_set_sensor_streamon_in_sync_mode(cif_dev);
1171 } else {
1172 if (!on && atomic_dec_return(&p->stream_cnt) > 0)
1173 return 0;
1174
1175 if (on) {
1176 atomic_inc(&p->stream_cnt);
1177 if (cif_dev->hdr.hdr_mode == HDR_X2) {
1178 if (atomic_read(&p->stream_cnt) == 1) {
1179 rockchip_set_system_status(SYS_STATUS_CIF0);
1180 can_be_set = false;
1181 } else if (atomic_read(&p->stream_cnt) == 2) {
1182 can_be_set = true;
1183 }
1184 } else if (cif_dev->hdr.hdr_mode == HDR_X3) {
1185 if (atomic_read(&p->stream_cnt) == 1) {
1186 rockchip_set_system_status(SYS_STATUS_CIF0);
1187 can_be_set = false;
1188 } else if (atomic_read(&p->stream_cnt) == 3) {
1189 can_be_set = true;
1190 }
1191 }
1192 }
1193
1194 if ((on && can_be_set) || !on) {
1195 if (on) {
1196 cif_dev->irq_stats.csi_overflow_cnt = 0;
1197 cif_dev->irq_stats.csi_bwidth_lack_cnt = 0;
1198 cif_dev->irq_stats.dvp_bus_err_cnt = 0;
1199 cif_dev->irq_stats.dvp_line_err_cnt = 0;
1200 cif_dev->irq_stats.dvp_overflow_cnt = 0;
1201 cif_dev->irq_stats.dvp_pix_err_cnt = 0;
1202 cif_dev->irq_stats.dvp_bwidth_lack_cnt = 0;
1203 cif_dev->irq_stats.all_err_cnt = 0;
1204 cif_dev->irq_stats.csi_size_err_cnt = 0;
1205 cif_dev->irq_stats.dvp_size_err_cnt = 0;
1206 cif_dev->irq_stats.frm_end_cnt[0] = 0;
1207 cif_dev->irq_stats.frm_end_cnt[1] = 0;
1208 cif_dev->irq_stats.frm_end_cnt[2] = 0;
1209 cif_dev->irq_stats.frm_end_cnt[3] = 0;
1210 cif_dev->irq_stats.not_active_buf_cnt[0] = 0;
1211 cif_dev->irq_stats.not_active_buf_cnt[1] = 0;
1212 cif_dev->irq_stats.not_active_buf_cnt[2] = 0;
1213 cif_dev->irq_stats.not_active_buf_cnt[3] = 0;
1214 cif_dev->irq_stats.trig_simult_cnt[0] = 0;
1215 cif_dev->irq_stats.trig_simult_cnt[1] = 0;
1216 cif_dev->irq_stats.trig_simult_cnt[2] = 0;
1217 cif_dev->irq_stats.trig_simult_cnt[3] = 0;
1218 cif_dev->is_start_hdr = true;
1219 cif_dev->reset_watchdog_timer.is_triggered = false;
1220 cif_dev->reset_watchdog_timer.is_running = false;
1221 for (i = 0; i < cif_dev->num_channels; i++)
1222 cif_dev->reset_watchdog_timer.last_buf_wakeup_cnt[i] = 0;
1223 cif_dev->reset_watchdog_timer.run_cnt = 0;
1224 }
1225
1226 /* phy -> sensor */
1227 for (i = 0; i < p->num_subdevs; i++) {
1228 if (p->subdevs[i] == cif_dev->terminal_sensor.sd &&
1229 on &&
1230 cif_dev->is_thunderboot &&
1231 !rk_tb_mcu_is_done()) {
1232 cif_dev->tb_client.data = p->subdevs[i];
1233 cif_dev->tb_client.cb = rkcif_sensor_streaming_cb;
1234 rk_tb_client_register_cb(&cif_dev->tb_client);
1235 } else {
1236 ret = v4l2_subdev_call(p->subdevs[i], video, s_stream, on);
1237 }
1238 if (on && ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
1239 goto err_stream_off;
1240 }
1241 if (cif_dev->sditf_cnt > 1) {
1242 for (i = 0; i < cif_dev->sditf_cnt; i++) {
1243 ret = v4l2_subdev_call(cif_dev->sditf[i]->sensor_sd,
1244 video,
1245 s_stream,
1246 on);
1247 if (on && ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
1248 goto err_stream_off;
1249 }
1250 }
1251
1252 if (on)
1253 rkcif_set_sensor_streamon_in_sync_mode(cif_dev);
1254 }
1255 }
1256
1257 if (!on)
1258 rockchip_clear_system_status(SYS_STATUS_CIF0);
1259
1260 return 0;
1261
1262 err_stream_off:
1263 for (--i; i >= 0; --i)
1264 v4l2_subdev_call(p->subdevs[i], video, s_stream, false);
1265 rockchip_clear_system_status(SYS_STATUS_CIF0);
1266 return ret;
1267 }
1268
rkcif_create_link(struct rkcif_device * dev,struct rkcif_sensor_info * sensor,u32 stream_num,bool * mipi_lvds_linked)1269 static int rkcif_create_link(struct rkcif_device *dev,
1270 struct rkcif_sensor_info *sensor,
1271 u32 stream_num,
1272 bool *mipi_lvds_linked)
1273 {
1274 struct rkcif_sensor_info linked_sensor;
1275 struct media_entity *source_entity, *sink_entity;
1276 int ret = 0;
1277 u32 flags, pad, id;
1278 int pad_offset = 0;
1279
1280 if (dev->chip_id >= CHIP_RK3588_CIF)
1281 pad_offset = 4;
1282
1283 linked_sensor.lanes = sensor->lanes;
1284
1285 if (sensor->mbus.type == V4L2_MBUS_CCP2) {
1286 linked_sensor.sd = &dev->lvds_subdev.sd;
1287 dev->lvds_subdev.sensor_self.sd = &dev->lvds_subdev.sd;
1288 dev->lvds_subdev.sensor_self.lanes = sensor->lanes;
1289 memcpy(&dev->lvds_subdev.sensor_self.mbus, &sensor->mbus,
1290 sizeof(struct v4l2_mbus_config));
1291 } else {
1292 linked_sensor.sd = sensor->sd;
1293 }
1294
1295 memcpy(&linked_sensor.mbus, &sensor->mbus,
1296 sizeof(struct v4l2_mbus_config));
1297
1298 for (pad = 0; pad < linked_sensor.sd->entity.num_pads; pad++) {
1299 if (linked_sensor.sd->entity.pads[pad].flags &
1300 MEDIA_PAD_FL_SOURCE) {
1301 if (pad == linked_sensor.sd->entity.num_pads) {
1302 dev_err(dev->dev,
1303 "failed to find src pad for %s\n",
1304 linked_sensor.sd->name);
1305
1306 break;
1307 }
1308
1309 if ((linked_sensor.mbus.type == V4L2_MBUS_BT656 ||
1310 linked_sensor.mbus.type == V4L2_MBUS_PARALLEL) &&
1311 (dev->chip_id == CHIP_RK1808_CIF)) {
1312 source_entity = &linked_sensor.sd->entity;
1313 sink_entity = &dev->stream[RKCIF_STREAM_CIF].vnode.vdev.entity;
1314
1315 ret = media_create_pad_link(source_entity,
1316 pad,
1317 sink_entity,
1318 0,
1319 MEDIA_LNK_FL_ENABLED);
1320 if (ret)
1321 dev_err(dev->dev, "failed to create link for %s\n",
1322 linked_sensor.sd->name);
1323 break;
1324 }
1325
1326 if ((linked_sensor.mbus.type == V4L2_MBUS_BT656 ||
1327 linked_sensor.mbus.type == V4L2_MBUS_PARALLEL) &&
1328 (dev->chip_id >= CHIP_RV1126_CIF)) {
1329 source_entity = &linked_sensor.sd->entity;
1330 sink_entity = &dev->stream[pad].vnode.vdev.entity;
1331
1332 ret = media_create_pad_link(source_entity,
1333 pad,
1334 sink_entity,
1335 0,
1336 MEDIA_LNK_FL_ENABLED);
1337 if (ret)
1338 dev_err(dev->dev, "failed to create link for %s pad[%d]\n",
1339 linked_sensor.sd->name, pad);
1340 continue;
1341 }
1342
1343 for (id = 0; id < stream_num; id++) {
1344 source_entity = &linked_sensor.sd->entity;
1345 sink_entity = &dev->stream[id].vnode.vdev.entity;
1346
1347 if ((dev->chip_id < CHIP_RK1808_CIF) ||
1348 (id == pad - 1 && !(*mipi_lvds_linked)))
1349 flags = MEDIA_LNK_FL_ENABLED;
1350 else
1351 flags = 0;
1352
1353 ret = media_create_pad_link(source_entity,
1354 pad,
1355 sink_entity,
1356 0,
1357 flags);
1358 if (ret) {
1359 dev_err(dev->dev,
1360 "failed to create link for %s\n",
1361 linked_sensor.sd->name);
1362 break;
1363 }
1364 }
1365 if (dev->chip_id >= CHIP_RK3588_CIF) {
1366 for (id = 0; id < stream_num; id++) {
1367 source_entity = &linked_sensor.sd->entity;
1368 sink_entity = &dev->scale_vdev[id].vnode.vdev.entity;
1369
1370 if ((id + stream_num) == pad - 1 && !(*mipi_lvds_linked))
1371 flags = MEDIA_LNK_FL_ENABLED;
1372 else
1373 flags = 0;
1374
1375 ret = media_create_pad_link(source_entity,
1376 pad,
1377 sink_entity,
1378 0,
1379 flags);
1380 if (ret) {
1381 dev_err(dev->dev,
1382 "failed to create link for %s\n",
1383 linked_sensor.sd->name);
1384 break;
1385 }
1386 }
1387 }
1388 if (dev->chip_id > CHIP_RK1808_CIF) {
1389 for (id = 0; id < RKCIF_MAX_TOOLS_CH; id++) {
1390 source_entity = &linked_sensor.sd->entity;
1391 sink_entity = &dev->tools_vdev[id].vnode.vdev.entity;
1392
1393 if ((id + stream_num + pad_offset) == pad - 1 && !(*mipi_lvds_linked))
1394 flags = MEDIA_LNK_FL_ENABLED;
1395 else
1396 flags = 0;
1397
1398 ret = media_create_pad_link(source_entity,
1399 pad,
1400 sink_entity,
1401 0,
1402 flags);
1403 if (ret) {
1404 dev_err(dev->dev,
1405 "failed to create link for %s\n",
1406 linked_sensor.sd->name);
1407 break;
1408 }
1409 }
1410 }
1411 }
1412 }
1413
1414 if (sensor->mbus.type == V4L2_MBUS_CCP2) {
1415 source_entity = &sensor->sd->entity;
1416 sink_entity = &linked_sensor.sd->entity;
1417 ret = media_create_pad_link(source_entity,
1418 1,
1419 sink_entity,
1420 0,
1421 MEDIA_LNK_FL_ENABLED);
1422 if (ret)
1423 dev_err(dev->dev, "failed to create link between %s and %s\n",
1424 linked_sensor.sd->name,
1425 sensor->sd->name);
1426 }
1427
1428 if (linked_sensor.mbus.type != V4L2_MBUS_BT656 &&
1429 linked_sensor.mbus.type != V4L2_MBUS_PARALLEL)
1430 *mipi_lvds_linked = true;
1431 return ret;
1432 }
1433
1434 /***************************** media controller *******************************/
rkcif_create_links(struct rkcif_device * dev)1435 static int rkcif_create_links(struct rkcif_device *dev)
1436 {
1437 u32 s = 0;
1438 u32 stream_num = 0;
1439 bool mipi_lvds_linked = false;
1440
1441 if (dev->chip_id < CHIP_RV1126_CIF) {
1442 if (dev->inf_id == RKCIF_MIPI_LVDS)
1443 stream_num = RKCIF_MAX_STREAM_MIPI;
1444 else
1445 stream_num = RKCIF_SINGLE_STREAM;
1446 } else {
1447 stream_num = RKCIF_MAX_STREAM_MIPI;
1448 }
1449
1450 /* sensor links(or mipi-phy) */
1451 for (s = 0; s < dev->num_sensors; ++s) {
1452 struct rkcif_sensor_info *sensor = &dev->sensors[s];
1453
1454 rkcif_create_link(dev, sensor, stream_num, &mipi_lvds_linked);
1455 }
1456
1457 return 0;
1458 }
1459
_set_pipeline_default_fmt(struct rkcif_device * dev)1460 static int _set_pipeline_default_fmt(struct rkcif_device *dev)
1461 {
1462 rkcif_set_default_fmt(dev);
1463 return 0;
1464 }
1465
subdev_asyn_register_itf(struct rkcif_device * dev)1466 static int subdev_asyn_register_itf(struct rkcif_device *dev)
1467 {
1468 struct sditf_priv *sditf = NULL;
1469 int ret = 0;
1470
1471 ret = rkcif_update_sensor_info(&dev->stream[0]);
1472 if (ret) {
1473 v4l2_err(&dev->v4l2_dev,
1474 "There is not terminal subdev, not synchronized with ISP\n");
1475 return 0;
1476 }
1477 sditf = dev->sditf[0];
1478 if (sditf && (!sditf->is_combine_mode) && (!dev->is_notifier_isp)) {
1479 ret = v4l2_async_register_subdev_sensor_common(&sditf->sd);
1480 dev->is_notifier_isp = true;
1481 }
1482
1483 return ret;
1484 }
1485
subdev_notifier_complete(struct v4l2_async_notifier * notifier)1486 static int subdev_notifier_complete(struct v4l2_async_notifier *notifier)
1487 {
1488 struct rkcif_device *dev;
1489 struct rkcif_sensor_info *sensor;
1490 struct v4l2_subdev *sd;
1491 struct v4l2_device *v4l2_dev = NULL;
1492 int ret, index;
1493
1494 dev = container_of(notifier, struct rkcif_device, notifier);
1495
1496 v4l2_dev = &dev->v4l2_dev;
1497
1498 for (index = 0; index < dev->num_sensors; index++) {
1499 sensor = &dev->sensors[index];
1500
1501 list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
1502 if (sd->ops) {
1503 if (sd == sensor->sd) {
1504 ret = v4l2_subdev_call(sd,
1505 pad,
1506 get_mbus_config,
1507 0,
1508 &sensor->mbus);
1509 if (ret)
1510 v4l2_err(v4l2_dev,
1511 "get mbus config failed for linking\n");
1512 }
1513 }
1514 }
1515
1516 if (sensor->mbus.type == V4L2_MBUS_CCP2 ||
1517 sensor->mbus.type == V4L2_MBUS_CSI2_DPHY ||
1518 sensor->mbus.type == V4L2_MBUS_CSI2_CPHY) {
1519
1520 switch (sensor->mbus.flags & V4L2_MBUS_CSI2_LANES) {
1521 case V4L2_MBUS_CSI2_1_LANE:
1522 sensor->lanes = 1;
1523 break;
1524 case V4L2_MBUS_CSI2_2_LANE:
1525 sensor->lanes = 2;
1526 break;
1527 case V4L2_MBUS_CSI2_3_LANE:
1528 sensor->lanes = 3;
1529 break;
1530 case V4L2_MBUS_CSI2_4_LANE:
1531 sensor->lanes = 4;
1532 break;
1533 default:
1534 sensor->lanes = 1;
1535 }
1536 }
1537
1538 if (sensor->mbus.type == V4L2_MBUS_CCP2) {
1539 ret = rkcif_register_lvds_subdev(dev);
1540 if (ret < 0) {
1541 v4l2_err(&dev->v4l2_dev,
1542 "Err: register lvds subdev failed!!!\n");
1543 goto notifier_end;
1544 }
1545 break;
1546 }
1547
1548 if (sensor->mbus.type == V4L2_MBUS_PARALLEL ||
1549 sensor->mbus.type == V4L2_MBUS_BT656) {
1550 ret = rkcif_register_dvp_sof_subdev(dev);
1551 if (ret < 0) {
1552 v4l2_err(&dev->v4l2_dev,
1553 "Err: register dvp sof subdev failed!!!\n");
1554 goto notifier_end;
1555 }
1556 break;
1557 }
1558 }
1559
1560 ret = rkcif_create_links(dev);
1561 if (ret < 0)
1562 goto unregister_lvds;
1563
1564 ret = v4l2_device_register_subdev_nodes(&dev->v4l2_dev);
1565 if (ret < 0)
1566 goto unregister_lvds;
1567
1568 ret = _set_pipeline_default_fmt(dev);
1569 if (ret < 0)
1570 goto unregister_lvds;
1571
1572 if (!completion_done(&dev->cmpl_ntf))
1573 complete(&dev->cmpl_ntf);
1574 v4l2_info(&dev->v4l2_dev, "Async subdev notifier completed\n");
1575
1576 return ret;
1577
1578 unregister_lvds:
1579 rkcif_unregister_lvds_subdev(dev);
1580 rkcif_unregister_dvp_sof_subdev(dev);
1581 notifier_end:
1582 return ret;
1583 }
1584
1585 struct rkcif_async_subdev {
1586 struct v4l2_async_subdev asd;
1587 struct v4l2_mbus_config mbus;
1588 int lanes;
1589 };
1590
subdev_notifier_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)1591 static int subdev_notifier_bound(struct v4l2_async_notifier *notifier,
1592 struct v4l2_subdev *subdev,
1593 struct v4l2_async_subdev *asd)
1594 {
1595 struct rkcif_device *cif_dev = container_of(notifier,
1596 struct rkcif_device, notifier);
1597 struct rkcif_async_subdev *s_asd = container_of(asd,
1598 struct rkcif_async_subdev, asd);
1599
1600 if (cif_dev->num_sensors == ARRAY_SIZE(cif_dev->sensors)) {
1601 v4l2_err(&cif_dev->v4l2_dev,
1602 "%s: the num of subdev is beyond %d\n",
1603 __func__, cif_dev->num_sensors);
1604 return -EBUSY;
1605 }
1606
1607 cif_dev->sensors[cif_dev->num_sensors].lanes = s_asd->lanes;
1608 cif_dev->sensors[cif_dev->num_sensors].mbus = s_asd->mbus;
1609 cif_dev->sensors[cif_dev->num_sensors].sd = subdev;
1610 ++cif_dev->num_sensors;
1611
1612 v4l2_err(subdev, "Async registered subdev\n");
1613
1614 return 0;
1615 }
1616
rkcif_fwnode_parse(struct device * dev,struct v4l2_fwnode_endpoint * vep,struct v4l2_async_subdev * asd)1617 static int rkcif_fwnode_parse(struct device *dev,
1618 struct v4l2_fwnode_endpoint *vep,
1619 struct v4l2_async_subdev *asd)
1620 {
1621 struct rkcif_async_subdev *rk_asd =
1622 container_of(asd, struct rkcif_async_subdev, asd);
1623 struct v4l2_fwnode_bus_parallel *bus = &vep->bus.parallel;
1624
1625 if (vep->bus_type != V4L2_MBUS_BT656 &&
1626 vep->bus_type != V4L2_MBUS_PARALLEL &&
1627 vep->bus_type != V4L2_MBUS_CSI2_DPHY &&
1628 vep->bus_type != V4L2_MBUS_CSI2_CPHY &&
1629 vep->bus_type != V4L2_MBUS_CCP2)
1630 return 0;
1631
1632 rk_asd->mbus.type = vep->bus_type;
1633
1634 if (vep->bus_type == V4L2_MBUS_CSI2_DPHY ||
1635 vep->bus_type == V4L2_MBUS_CSI2_CPHY) {
1636 rk_asd->mbus.flags = vep->bus.mipi_csi2.flags;
1637 rk_asd->lanes = vep->bus.mipi_csi2.num_data_lanes;
1638 } else if (vep->bus_type == V4L2_MBUS_CCP2) {
1639 rk_asd->lanes = vep->bus.mipi_csi1.data_lane;
1640 } else {
1641 rk_asd->mbus.flags = bus->flags;
1642 }
1643
1644 return 0;
1645 }
1646
1647 static const struct v4l2_async_notifier_operations subdev_notifier_ops = {
1648 .bound = subdev_notifier_bound,
1649 .complete = subdev_notifier_complete,
1650 };
1651
cif_subdev_notifier(struct rkcif_device * cif_dev)1652 static int cif_subdev_notifier(struct rkcif_device *cif_dev)
1653 {
1654 struct v4l2_async_notifier *ntf = &cif_dev->notifier;
1655 struct device *dev = cif_dev->dev;
1656 int ret;
1657
1658 v4l2_async_notifier_init(ntf);
1659
1660 ret = v4l2_async_notifier_parse_fwnode_endpoints(
1661 dev, ntf, sizeof(struct rkcif_async_subdev), rkcif_fwnode_parse);
1662
1663 if (ret < 0) {
1664 v4l2_err(&cif_dev->v4l2_dev,
1665 "%s: parse fwnode failed\n", __func__);
1666 return ret;
1667 }
1668
1669 ntf->ops = &subdev_notifier_ops;
1670
1671 ret = v4l2_async_notifier_register(&cif_dev->v4l2_dev, ntf);
1672
1673 return ret;
1674 }
1675
notifier_isp_thread(void * data)1676 static int notifier_isp_thread(void *data)
1677 {
1678 struct rkcif_device *dev = data;
1679 int ret = 0;
1680
1681 ret = wait_for_completion_timeout(&dev->cmpl_ntf, msecs_to_jiffies(5000));
1682 if (ret) {
1683 mutex_lock(&rkcif_dev_mutex);
1684 subdev_asyn_register_itf(dev);
1685 mutex_unlock(&rkcif_dev_mutex);
1686 }
1687 return 0;
1688 }
1689
1690 /***************************** platform deive *******************************/
1691
rkcif_register_platform_subdevs(struct rkcif_device * cif_dev)1692 static int rkcif_register_platform_subdevs(struct rkcif_device *cif_dev)
1693 {
1694 int stream_num = 0, ret;
1695
1696 if (cif_dev->chip_id < CHIP_RV1126_CIF) {
1697 if (cif_dev->inf_id == RKCIF_MIPI_LVDS) {
1698 stream_num = RKCIF_MAX_STREAM_MIPI;
1699 ret = rkcif_register_stream_vdevs(cif_dev, stream_num,
1700 true);
1701 } else {
1702 stream_num = RKCIF_SINGLE_STREAM;
1703 ret = rkcif_register_stream_vdevs(cif_dev, stream_num,
1704 false);
1705 }
1706 } else {
1707 stream_num = RKCIF_MAX_STREAM_MIPI;
1708 ret = rkcif_register_stream_vdevs(cif_dev, stream_num, true);
1709 }
1710
1711 if (ret < 0) {
1712 dev_err(cif_dev->dev, "cif register stream[%d] failed!\n", stream_num);
1713 return -EINVAL;
1714 }
1715
1716 if (cif_dev->chip_id == CHIP_RK3588_CIF ||
1717 cif_dev->chip_id == CHIP_RV1106_CIF ||
1718 cif_dev->chip_id == CHIP_RK3562_CIF) {
1719 ret = rkcif_register_scale_vdevs(cif_dev, RKCIF_MAX_SCALE_CH, true);
1720
1721 if (ret < 0) {
1722 dev_err(cif_dev->dev, "cif register scale_vdev[%d] failed!\n", stream_num);
1723 goto err_unreg_stream_vdev;
1724 }
1725 }
1726 if (cif_dev->chip_id > CHIP_RK1808_CIF) {
1727 ret = rkcif_register_tools_vdevs(cif_dev, RKCIF_MAX_TOOLS_CH, true);
1728
1729 if (ret < 0) {
1730 dev_err(cif_dev->dev, "cif register tools_vdev[%d] failed!\n", RKCIF_MAX_TOOLS_CH);
1731 goto err_unreg_stream_vdev;
1732 }
1733 cif_dev->is_support_tools = true;
1734 } else {
1735 cif_dev->is_support_tools = false;
1736 }
1737 init_completion(&cif_dev->cmpl_ntf);
1738 kthread_run(notifier_isp_thread, cif_dev, "notifier isp");
1739 ret = cif_subdev_notifier(cif_dev);
1740 if (ret < 0) {
1741 v4l2_err(&cif_dev->v4l2_dev,
1742 "Failed to register subdev notifier(%d)\n", ret);
1743 goto err_unreg_stream_vdev;
1744 }
1745
1746 return 0;
1747 err_unreg_stream_vdev:
1748 rkcif_unregister_stream_vdevs(cif_dev, stream_num);
1749 if (cif_dev->chip_id == CHIP_RK3588_CIF ||
1750 cif_dev->chip_id == CHIP_RV1106_CIF ||
1751 cif_dev->chip_id == CHIP_RK3562_CIF)
1752 rkcif_unregister_scale_vdevs(cif_dev, RKCIF_MAX_SCALE_CH);
1753
1754 if (cif_dev->chip_id > CHIP_RK1808_CIF)
1755 rkcif_unregister_tools_vdevs(cif_dev, RKCIF_MAX_TOOLS_CH);
1756
1757 return ret;
1758 }
1759
rkcif_irq_handler(int irq,struct rkcif_device * cif_dev)1760 static irqreturn_t rkcif_irq_handler(int irq, struct rkcif_device *cif_dev)
1761 {
1762 if (cif_dev->workmode == RKCIF_WORKMODE_PINGPONG) {
1763 if (cif_dev->chip_id < CHIP_RK3588_CIF)
1764 rkcif_irq_pingpong(cif_dev);
1765 else
1766 rkcif_irq_pingpong_v1(cif_dev);
1767 } else {
1768 rkcif_irq_oneframe(cif_dev);
1769 }
1770 return IRQ_HANDLED;
1771 }
1772
rkcif_irq_lite_handler(int irq,struct rkcif_device * cif_dev)1773 static irqreturn_t rkcif_irq_lite_handler(int irq, struct rkcif_device *cif_dev)
1774 {
1775 rkcif_irq_lite_lvds(cif_dev);
1776
1777 return IRQ_HANDLED;
1778 }
1779
rkcif_attach_dphy_hw(struct rkcif_device * cif_dev)1780 static void rkcif_attach_dphy_hw(struct rkcif_device *cif_dev)
1781 {
1782 struct platform_device *plat_dev;
1783 struct device *dev = cif_dev->dev;
1784 struct device_node *np;
1785 struct csi2_dphy_hw *dphy_hw;
1786
1787 np = of_parse_phandle(dev->of_node, "rockchip,dphy_hw", 0);
1788 if (!np || !of_device_is_available(np)) {
1789 dev_err(dev,
1790 "failed to get dphy hw node\n");
1791 return;
1792 }
1793
1794 plat_dev = of_find_device_by_node(np);
1795 of_node_put(np);
1796 if (!plat_dev) {
1797 dev_err(dev,
1798 "failed to get dphy hw from node\n");
1799 return;
1800 }
1801
1802 dphy_hw = platform_get_drvdata(plat_dev);
1803 if (!dphy_hw) {
1804 dev_err(dev,
1805 "failed attach dphy hw\n");
1806 return;
1807 }
1808 cif_dev->dphy_hw = dphy_hw;
1809 }
1810
rkcif_attach_hw(struct rkcif_device * cif_dev)1811 int rkcif_attach_hw(struct rkcif_device *cif_dev)
1812 {
1813 struct device_node *np;
1814 struct platform_device *pdev;
1815 struct rkcif_hw *hw;
1816
1817 if (cif_dev->hw_dev)
1818 return 0;
1819
1820 cif_dev->chip_id = CHIP_RV1126_CIF_LITE;
1821 np = of_parse_phandle(cif_dev->dev->of_node, "rockchip,hw", 0);
1822 if (!np || !of_device_is_available(np)) {
1823 dev_err(cif_dev->dev, "failed to get cif hw node\n");
1824 return -ENODEV;
1825 }
1826
1827 pdev = of_find_device_by_node(np);
1828 of_node_put(np);
1829 if (!pdev) {
1830 dev_err(cif_dev->dev, "failed to get cif hw from node\n");
1831 return -ENODEV;
1832 }
1833
1834 hw = platform_get_drvdata(pdev);
1835 if (!hw) {
1836 dev_err(cif_dev->dev, "failed attach cif hw\n");
1837 return -EINVAL;
1838 }
1839
1840 hw->cif_dev[hw->dev_num] = cif_dev;
1841 hw->dev_num++;
1842 cif_dev->hw_dev = hw;
1843 cif_dev->chip_id = hw->chip_id;
1844 dev_info(cif_dev->dev, "attach to cif hw node\n");
1845 if (IS_ENABLED(CONFIG_CPU_RV1106))
1846 rkcif_attach_dphy_hw(cif_dev);
1847
1848 return 0;
1849 }
1850
rkcif_detach_hw(struct rkcif_device * cif_dev)1851 static int rkcif_detach_hw(struct rkcif_device *cif_dev)
1852 {
1853 struct rkcif_hw *hw = cif_dev->hw_dev;
1854 int i;
1855
1856 for (i = 0; i < hw->dev_num; i++) {
1857 if (hw->cif_dev[i] == cif_dev) {
1858 if ((i + 1) < hw->dev_num) {
1859 hw->cif_dev[i] = hw->cif_dev[i + 1];
1860 hw->cif_dev[i + 1] = NULL;
1861 } else {
1862 hw->cif_dev[i] = NULL;
1863 }
1864
1865 hw->dev_num--;
1866 dev_info(cif_dev->dev, "detach to cif hw node\n");
1867 break;
1868 }
1869 }
1870
1871 return 0;
1872 }
1873
rkcif_init_reset_monitor(struct rkcif_device * dev)1874 static void rkcif_init_reset_monitor(struct rkcif_device *dev)
1875 {
1876 struct rkcif_timer *timer = &dev->reset_watchdog_timer;
1877
1878 #if defined(CONFIG_ROCKCHIP_CIF_USE_MONITOR)
1879 timer->monitor_mode = CONFIG_ROCKCHIP_CIF_MONITOR_MODE;
1880 timer->err_time_interval = CONFIG_ROCKCHIP_CIF_MONITOR_KEEP_TIME;
1881 timer->frm_num_of_monitor_cycle = CONFIG_ROCKCHIP_CIF_MONITOR_CYCLE;
1882 timer->triggered_frame_num = CONFIG_ROCKCHIP_CIF_MONITOR_START_FRAME;
1883 timer->csi2_err_ref_cnt = CONFIG_ROCKCHIP_CIF_MONITOR_ERR_CNT;
1884 #if defined(CONFIG_ROCKCHIP_CIF_RESET_BY_USER)
1885 timer->is_ctrl_by_user = true;
1886 #else
1887 timer->is_ctrl_by_user = false;
1888 #endif
1889 #else
1890 timer->monitor_mode = RKCIF_MONITOR_MODE_IDLE;
1891 timer->err_time_interval = 0xffffffff;
1892 timer->frm_num_of_monitor_cycle = 0xffffffff;
1893 timer->triggered_frame_num = 0xffffffff;
1894 timer->csi2_err_ref_cnt = 0xffffffff;
1895 #endif
1896 timer->is_running = false;
1897 timer->is_triggered = false;
1898 timer->is_buf_stop_update = false;
1899 timer->csi2_err_cnt_even = 0;
1900 timer->csi2_err_cnt_odd = 0;
1901 timer->csi2_err_fs_fe_cnt = 0;
1902 timer->csi2_err_fs_fe_detect_cnt = 0;
1903 timer->csi2_err_triggered_cnt = 0;
1904 timer->csi2_first_err_timestamp = 0;
1905
1906 timer_setup(&timer->timer, rkcif_reset_watchdog_timer_handler, 0);
1907
1908 INIT_WORK(&dev->reset_work.work, rkcif_reset_work);
1909 }
1910
rkcif_plat_init(struct rkcif_device * cif_dev,struct device_node * node,int inf_id)1911 int rkcif_plat_init(struct rkcif_device *cif_dev, struct device_node *node, int inf_id)
1912 {
1913 struct device *dev = cif_dev->dev;
1914 struct v4l2_device *v4l2_dev;
1915 int ret;
1916
1917 cif_dev->hdr.hdr_mode = NO_HDR;
1918 cif_dev->inf_id = inf_id;
1919
1920 mutex_init(&cif_dev->stream_lock);
1921 mutex_init(&cif_dev->scale_lock);
1922 mutex_init(&cif_dev->tools_lock);
1923 spin_lock_init(&cif_dev->hdr_lock);
1924 spin_lock_init(&cif_dev->buffree_lock);
1925 spin_lock_init(&cif_dev->reset_watchdog_timer.timer_lock);
1926 spin_lock_init(&cif_dev->reset_watchdog_timer.csi2_err_lock);
1927 atomic_set(&cif_dev->pipe.power_cnt, 0);
1928 atomic_set(&cif_dev->pipe.stream_cnt, 0);
1929 atomic_set(&cif_dev->power_cnt, 0);
1930 cif_dev->is_start_hdr = false;
1931 cif_dev->pipe.open = rkcif_pipeline_open;
1932 cif_dev->pipe.close = rkcif_pipeline_close;
1933 cif_dev->pipe.set_stream = rkcif_pipeline_set_stream;
1934 cif_dev->isr_hdl = rkcif_irq_handler;
1935 cif_dev->id_use_cnt = 0;
1936 memset(&cif_dev->sync_cfg, 0, sizeof(cif_dev->sync_cfg));
1937 cif_dev->sditf_cnt = 0;
1938 cif_dev->is_notifier_isp = false;
1939 cif_dev->sensor_linetime = 0;
1940 cif_dev->early_line = 0;
1941 cif_dev->is_thunderboot = false;
1942 cif_dev->rdbk_debug = 0;
1943 memset(&cif_dev->channels[0].capture_info, 0, sizeof(cif_dev->channels[0].capture_info));
1944 if (cif_dev->chip_id == CHIP_RV1126_CIF_LITE)
1945 cif_dev->isr_hdl = rkcif_irq_lite_handler;
1946
1947 INIT_WORK(&cif_dev->err_state_work.work, rkcif_err_print_work);
1948
1949 if (cif_dev->chip_id < CHIP_RV1126_CIF) {
1950 if (cif_dev->inf_id == RKCIF_MIPI_LVDS) {
1951 rkcif_stream_init(cif_dev, RKCIF_STREAM_MIPI_ID0);
1952 rkcif_stream_init(cif_dev, RKCIF_STREAM_MIPI_ID1);
1953 rkcif_stream_init(cif_dev, RKCIF_STREAM_MIPI_ID2);
1954 rkcif_stream_init(cif_dev, RKCIF_STREAM_MIPI_ID3);
1955 } else {
1956 rkcif_stream_init(cif_dev, RKCIF_STREAM_CIF);
1957 }
1958 } else {
1959 /* for rv1126/rk356x, bt656/bt1120/mipi are multi channels */
1960 rkcif_stream_init(cif_dev, RKCIF_STREAM_MIPI_ID0);
1961 rkcif_stream_init(cif_dev, RKCIF_STREAM_MIPI_ID1);
1962 rkcif_stream_init(cif_dev, RKCIF_STREAM_MIPI_ID2);
1963 rkcif_stream_init(cif_dev, RKCIF_STREAM_MIPI_ID3);
1964 }
1965
1966 if (cif_dev->chip_id == CHIP_RK3588_CIF ||
1967 cif_dev->chip_id == CHIP_RV1106_CIF ||
1968 cif_dev->chip_id == CHIP_RK3562_CIF) {
1969 rkcif_init_scale_vdev(cif_dev, RKCIF_SCALE_CH0);
1970 rkcif_init_scale_vdev(cif_dev, RKCIF_SCALE_CH1);
1971 rkcif_init_scale_vdev(cif_dev, RKCIF_SCALE_CH2);
1972 rkcif_init_scale_vdev(cif_dev, RKCIF_SCALE_CH3);
1973 }
1974
1975 if (cif_dev->chip_id > CHIP_RK1808_CIF) {
1976 rkcif_init_tools_vdev(cif_dev, RKCIF_TOOLS_CH0);
1977 rkcif_init_tools_vdev(cif_dev, RKCIF_TOOLS_CH1);
1978 rkcif_init_tools_vdev(cif_dev, RKCIF_TOOLS_CH2);
1979 }
1980 #if defined(CONFIG_ROCKCHIP_CIF_WORKMODE_PINGPONG)
1981 cif_dev->workmode = RKCIF_WORKMODE_PINGPONG;
1982 #elif defined(CONFIG_ROCKCHIP_CIF_WORKMODE_ONEFRAME)
1983 cif_dev->workmode = RKCIF_WORKMODE_ONEFRAME;
1984 #else
1985 cif_dev->workmode = RKCIF_WORKMODE_PINGPONG;
1986 #endif
1987
1988 #if defined(CONFIG_ROCKCHIP_CIF_USE_DUMMY_BUF)
1989 cif_dev->is_use_dummybuf = true;
1990 #else
1991 cif_dev->is_use_dummybuf = false;
1992 #endif
1993 if (cif_dev->chip_id == CHIP_RV1106_CIF)
1994 cif_dev->is_use_dummybuf = false;
1995
1996 strlcpy(cif_dev->media_dev.model, dev_name(dev),
1997 sizeof(cif_dev->media_dev.model));
1998 cif_dev->csi_host_idx = of_alias_get_id(node, "rkcif_mipi_lvds");
1999 if (cif_dev->csi_host_idx < 0 || cif_dev->csi_host_idx > 5)
2000 cif_dev->csi_host_idx = 0;
2001 if (cif_dev->hw_dev->is_rk3588s2) {
2002 if (cif_dev->csi_host_idx == 0)
2003 cif_dev->csi_host_idx = 2;
2004 else if (cif_dev->csi_host_idx == 2)
2005 cif_dev->csi_host_idx = 4;
2006 else if (cif_dev->csi_host_idx == 3)
2007 cif_dev->csi_host_idx = 5;
2008 v4l2_info(&cif_dev->v4l2_dev, "rk3588s2 attach to mipi%d\n",
2009 cif_dev->csi_host_idx);
2010 }
2011 cif_dev->csi_host_idx_def = cif_dev->csi_host_idx;
2012 cif_dev->media_dev.dev = dev;
2013 v4l2_dev = &cif_dev->v4l2_dev;
2014 v4l2_dev->mdev = &cif_dev->media_dev;
2015 strlcpy(v4l2_dev->name, dev_name(dev), sizeof(v4l2_dev->name));
2016
2017 ret = v4l2_device_register(cif_dev->dev, &cif_dev->v4l2_dev);
2018 if (ret < 0)
2019 return ret;
2020
2021 media_device_init(&cif_dev->media_dev);
2022 ret = media_device_register(&cif_dev->media_dev);
2023 if (ret < 0) {
2024 v4l2_err(v4l2_dev, "Failed to register media device: %d\n",
2025 ret);
2026 goto err_unreg_v4l2_dev;
2027 }
2028
2029 /* create & register platefom subdev (from of_node) */
2030 ret = rkcif_register_platform_subdevs(cif_dev);
2031 if (ret < 0)
2032 goto err_unreg_media_dev;
2033
2034 if (cif_dev->chip_id == CHIP_RV1126_CIF ||
2035 cif_dev->chip_id == CHIP_RV1126_CIF_LITE ||
2036 cif_dev->chip_id == CHIP_RK3568_CIF)
2037 rkcif_register_luma_vdev(&cif_dev->luma_vdev, v4l2_dev, cif_dev);
2038
2039 mutex_lock(&rkcif_dev_mutex);
2040 list_add_tail(&cif_dev->list, &rkcif_device_list);
2041 mutex_unlock(&rkcif_dev_mutex);
2042
2043 return 0;
2044
2045 err_unreg_media_dev:
2046 media_device_unregister(&cif_dev->media_dev);
2047 err_unreg_v4l2_dev:
2048 v4l2_device_unregister(&cif_dev->v4l2_dev);
2049 return ret;
2050 }
2051
rkcif_plat_uninit(struct rkcif_device * cif_dev)2052 int rkcif_plat_uninit(struct rkcif_device *cif_dev)
2053 {
2054 int stream_num = 0;
2055
2056 if (cif_dev->active_sensor->mbus.type == V4L2_MBUS_CCP2)
2057 rkcif_unregister_lvds_subdev(cif_dev);
2058
2059 if (cif_dev->active_sensor->mbus.type == V4L2_MBUS_BT656 ||
2060 cif_dev->active_sensor->mbus.type == V4L2_MBUS_PARALLEL)
2061 rkcif_unregister_dvp_sof_subdev(cif_dev);
2062
2063 media_device_unregister(&cif_dev->media_dev);
2064 v4l2_device_unregister(&cif_dev->v4l2_dev);
2065
2066 if (cif_dev->chip_id < CHIP_RV1126_CIF) {
2067 if (cif_dev->inf_id == RKCIF_MIPI_LVDS)
2068 stream_num = RKCIF_MAX_STREAM_MIPI;
2069 else
2070 stream_num = RKCIF_SINGLE_STREAM;
2071 } else {
2072 stream_num = RKCIF_MAX_STREAM_MIPI;
2073 }
2074 rkcif_unregister_stream_vdevs(cif_dev, stream_num);
2075
2076 if (cif_dev->chip_id == CHIP_RV1106_CIF)
2077 rkcif_rockit_dev_deinit();
2078 return 0;
2079 }
2080
2081 static const struct rkcif_match_data rkcif_dvp_match_data = {
2082 .inf_id = RKCIF_DVP,
2083 };
2084
2085 static const struct rkcif_match_data rkcif_mipi_lvds_match_data = {
2086 .inf_id = RKCIF_MIPI_LVDS,
2087 };
2088
2089 static const struct of_device_id rkcif_plat_of_match[] = {
2090 {
2091 .compatible = "rockchip,rkcif-dvp",
2092 .data = &rkcif_dvp_match_data,
2093 },
2094 {
2095 .compatible = "rockchip,rkcif-mipi-lvds",
2096 .data = &rkcif_mipi_lvds_match_data,
2097 },
2098 {},
2099 };
2100
rkcif_parse_dts(struct rkcif_device * cif_dev)2101 static void rkcif_parse_dts(struct rkcif_device *cif_dev)
2102 {
2103 int ret = 0;
2104 struct device_node *node = cif_dev->dev->of_node;
2105
2106 ret = of_property_read_u32(node,
2107 OF_CIF_WAIT_LINE,
2108 &cif_dev->wait_line);
2109 if (ret != 0)
2110 cif_dev->wait_line = 0;
2111 dev_info(cif_dev->dev, "rkcif wait line %d\n", cif_dev->wait_line);
2112 }
2113
rkcif_get_reserved_mem(struct rkcif_device * cif_dev)2114 static int rkcif_get_reserved_mem(struct rkcif_device *cif_dev)
2115 {
2116 struct device *dev = cif_dev->dev;
2117 struct device_node *np;
2118 struct resource r;
2119 int ret;
2120
2121 /* Get reserved memory region from Device-tree */
2122 np = of_parse_phandle(dev->of_node, "memory-region-thunderboot", 0);
2123 if (!np) {
2124 dev_info(dev, "No memory-region-thunderboot specified\n");
2125 return 0;
2126 }
2127
2128 ret = of_address_to_resource(np, 0, &r);
2129 if (ret) {
2130 dev_err(dev, "No memory address assigned to the region\n");
2131 return ret;
2132 }
2133
2134 cif_dev->resmem_pa = r.start;
2135 cif_dev->resmem_size = resource_size(&r);
2136 cif_dev->is_thunderboot = true;
2137 dev_info(dev, "Allocated reserved memory, paddr: 0x%x, size 0x%x\n",
2138 (u32)cif_dev->resmem_pa,
2139 (u32)cif_dev->resmem_size);
2140 return ret;
2141 }
2142
rkcif_plat_probe(struct platform_device * pdev)2143 static int rkcif_plat_probe(struct platform_device *pdev)
2144 {
2145 const struct of_device_id *match;
2146 struct device_node *node = pdev->dev.of_node;
2147 struct device *dev = &pdev->dev;
2148 struct rkcif_device *cif_dev;
2149 const struct rkcif_match_data *data;
2150 int ret;
2151
2152 sprintf(rkcif_version, "v%02x.%02x.%02x",
2153 RKCIF_DRIVER_VERSION >> 16,
2154 (RKCIF_DRIVER_VERSION & 0xff00) >> 8,
2155 RKCIF_DRIVER_VERSION & 0x00ff);
2156
2157 dev_info(dev, "rkcif driver version: %s\n", rkcif_version);
2158
2159 match = of_match_node(rkcif_plat_of_match, node);
2160 if (IS_ERR(match))
2161 return PTR_ERR(match);
2162 data = match->data;
2163
2164 cif_dev = devm_kzalloc(dev, sizeof(*cif_dev), GFP_KERNEL);
2165 if (!cif_dev)
2166 return -ENOMEM;
2167
2168 dev_set_drvdata(dev, cif_dev);
2169 cif_dev->dev = dev;
2170
2171 if (sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp))
2172 return -ENODEV;
2173
2174 ret = rkcif_attach_hw(cif_dev);
2175 if (ret)
2176 return ret;
2177
2178 rkcif_parse_dts(cif_dev);
2179
2180 ret = rkcif_plat_init(cif_dev, node, data->inf_id);
2181 if (ret) {
2182 rkcif_detach_hw(cif_dev);
2183 return ret;
2184 }
2185
2186 ret = rkcif_get_reserved_mem(cif_dev);
2187 if (ret)
2188 return ret;
2189
2190 if (rkcif_proc_init(cif_dev))
2191 dev_warn(dev, "dev:%s create proc failed\n", dev_name(dev));
2192
2193 rkcif_init_reset_monitor(cif_dev);
2194 if (cif_dev->chip_id == CHIP_RV1106_CIF)
2195 rkcif_rockit_dev_init(cif_dev);
2196 pm_runtime_enable(&pdev->dev);
2197
2198 return 0;
2199 }
2200
rkcif_plat_remove(struct platform_device * pdev)2201 static int rkcif_plat_remove(struct platform_device *pdev)
2202 {
2203 struct rkcif_device *cif_dev = platform_get_drvdata(pdev);
2204
2205 rkcif_plat_uninit(cif_dev);
2206 rkcif_detach_hw(cif_dev);
2207 rkcif_proc_cleanup(cif_dev);
2208 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2209 del_timer_sync(&cif_dev->reset_watchdog_timer.timer);
2210
2211 return 0;
2212 }
2213
rkcif_runtime_suspend(struct device * dev)2214 static int __maybe_unused rkcif_runtime_suspend(struct device *dev)
2215 {
2216 struct rkcif_device *cif_dev = dev_get_drvdata(dev);
2217 int ret = 0;
2218
2219 if (atomic_dec_return(&cif_dev->power_cnt))
2220 return 0;
2221
2222 mutex_lock(&cif_dev->hw_dev->dev_lock);
2223 ret = pm_runtime_put_sync(cif_dev->hw_dev->dev);
2224 mutex_unlock(&cif_dev->hw_dev->dev_lock);
2225 return (ret > 0) ? 0 : ret;
2226 }
2227
rkcif_runtime_resume(struct device * dev)2228 static int __maybe_unused rkcif_runtime_resume(struct device *dev)
2229 {
2230 struct rkcif_device *cif_dev = dev_get_drvdata(dev);
2231 int ret = 0;
2232
2233 if (atomic_inc_return(&cif_dev->power_cnt) > 1)
2234 return 0;
2235
2236 mutex_lock(&cif_dev->hw_dev->dev_lock);
2237 ret = pm_runtime_resume_and_get(cif_dev->hw_dev->dev);
2238 mutex_unlock(&cif_dev->hw_dev->dev_lock);
2239 rkcif_do_soft_reset(cif_dev);
2240 return (ret > 0) ? 0 : ret;
2241 }
2242
__rkcif_clr_unready_dev(void)2243 static int __maybe_unused __rkcif_clr_unready_dev(void)
2244 {
2245 struct rkcif_device *cif_dev;
2246
2247 mutex_lock(&rkcif_dev_mutex);
2248
2249 list_for_each_entry(cif_dev, &rkcif_device_list, list) {
2250 v4l2_async_notifier_clr_unready_dev(&cif_dev->notifier);
2251 subdev_asyn_register_itf(cif_dev);
2252 }
2253
2254 mutex_unlock(&rkcif_dev_mutex);
2255
2256 return 0;
2257 }
2258
rkcif_clr_unready_dev_param_set(const char * val,const struct kernel_param * kp)2259 static int rkcif_clr_unready_dev_param_set(const char *val, const struct kernel_param *kp)
2260 {
2261 #ifdef MODULE
2262 __rkcif_clr_unready_dev();
2263 #endif
2264
2265 return 0;
2266 }
2267
2268 module_param_call(clr_unready_dev, rkcif_clr_unready_dev_param_set, NULL, NULL, 0200);
2269 MODULE_PARM_DESC(clr_unready_dev, "clear unready devices");
2270
2271 #ifndef MODULE
rkcif_clr_unready_dev(void)2272 int rkcif_clr_unready_dev(void)
2273 {
2274 __rkcif_clr_unready_dev();
2275
2276 return 0;
2277 }
2278 #ifndef CONFIG_VIDEO_REVERSE_IMAGE
2279 late_initcall(rkcif_clr_unready_dev);
2280 #endif
2281 #endif
2282
2283 static const struct dev_pm_ops rkcif_plat_pm_ops = {
2284 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2285 pm_runtime_force_resume)
2286 SET_RUNTIME_PM_OPS(rkcif_runtime_suspend, rkcif_runtime_resume, NULL)
2287 };
2288
2289 struct platform_driver rkcif_plat_drv = {
2290 .driver = {
2291 .name = CIF_DRIVER_NAME,
2292 .of_match_table = of_match_ptr(rkcif_plat_of_match),
2293 .pm = &rkcif_plat_pm_ops,
2294 },
2295 .probe = rkcif_plat_probe,
2296 .remove = rkcif_plat_remove,
2297 };
2298 EXPORT_SYMBOL(rkcif_plat_drv);
2299
2300 MODULE_AUTHOR("Rockchip Camera/ISP team");
2301 MODULE_DESCRIPTION("Rockchip CIF platform driver");
2302 MODULE_LICENSE("GPL v2");
2303