1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd. */
3
4 #include <linux/delay.h>
5 #include <linux/of.h>
6 #include <linux/of_graph.h>
7 #include <linux/of_platform.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/slab.h>
10 #include <media/v4l2-common.h>
11 #include <media/v4l2-event.h>
12 #include <media/v4l2-fh.h>
13 #include <media/v4l2-ioctl.h>
14 #include <media/v4l2-subdev.h>
15 #include <media/videobuf2-dma-contig.h>
16 #include <linux/dma-iommu.h>
17 #include <linux/rk-camera-module.h>
18 #include "dev.h"
19 #include "regs.h"
20
21 static inline
to_bridge_buf(struct rkisp_ispp_buf * dbufs)22 struct rkisp_bridge_buf *to_bridge_buf(struct rkisp_ispp_buf *dbufs)
23 {
24 return container_of(dbufs, struct rkisp_bridge_buf, dbufs);
25 }
26
free_bridge_buf(struct rkisp_bridge_device * dev)27 static void free_bridge_buf(struct rkisp_bridge_device *dev)
28 {
29 struct rkisp_hw_dev *hw = dev->ispdev->hw_dev;
30 struct rkisp_bridge_buf *buf;
31 struct rkisp_ispp_buf *dbufs;
32 unsigned long lock_flags = 0;
33 int i, j;
34
35 spin_lock_irqsave(&hw->buf_lock, lock_flags);
36 if (--hw->buf_init_cnt > 0) {
37 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
38 return;
39 }
40
41 v4l2_dbg(1, rkisp_debug, &dev->ispdev->v4l2_dev,
42 "%s\n", __func__);
43
44 if (hw->cur_buf) {
45 list_add_tail(&hw->cur_buf->list, &hw->list);
46 if (hw->cur_buf == hw->nxt_buf)
47 hw->nxt_buf = NULL;
48 hw->cur_buf = NULL;
49 }
50
51 if (hw->nxt_buf) {
52 list_add_tail(&hw->nxt_buf->list, &hw->list);
53 hw->nxt_buf = NULL;
54 }
55
56 if (dev->ispdev->cur_fbcgain) {
57 list_add_tail(&dev->ispdev->cur_fbcgain->list, &hw->list);
58 dev->ispdev->cur_fbcgain = NULL;
59 }
60
61 while (!list_empty(&hw->rpt_list)) {
62 dbufs = list_first_entry(&hw->rpt_list,
63 struct rkisp_ispp_buf, list);
64 list_del(&dbufs->list);
65 list_add_tail(&dbufs->list, &hw->list);
66 }
67
68 while (!list_empty(&hw->list)) {
69 dbufs = list_first_entry(&hw->list,
70 struct rkisp_ispp_buf, list);
71 list_del(&dbufs->list);
72 }
73
74 hw->is_buf_init = false;
75 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
76 for (i = 0; i < BRIDGE_BUF_MAX; i++) {
77 buf = &hw->bufs[i];
78 for (j = 0; j < GROUP_BUF_MAX; j++)
79 rkisp_free_buffer(dev->ispdev, &buf->dummy[j]);
80 }
81
82 rkisp_free_common_dummy_buf(dev->ispdev);
83 }
84
init_buf(struct rkisp_bridge_device * dev,u32 pic_size,u32 gain_size)85 static int init_buf(struct rkisp_bridge_device *dev, u32 pic_size, u32 gain_size)
86 {
87 struct v4l2_subdev *sd = v4l2_get_subdev_hostdata(&dev->sd);
88 struct rkisp_hw_dev *hw = dev->ispdev->hw_dev;
89 struct rkisp_bridge_buf *buf;
90 struct rkisp_dummy_buffer *dummy;
91 int i, j, val, ret = 0;
92 unsigned long lock_flags = 0;
93 bool is_direct = (hw->isp_ver == ISP_V20) ? true : false;
94
95 spin_lock_irqsave(&hw->buf_lock, lock_flags);
96 if (++hw->buf_init_cnt > 1) {
97 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
98 return 0;
99 }
100 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
101
102 v4l2_dbg(1, rkisp_debug, &dev->ispdev->v4l2_dev,
103 "%s pic size:%d gain size:%d\n",
104 __func__, pic_size, gain_size);
105
106 INIT_LIST_HEAD(&hw->list);
107 for (i = 0; i < dev->buf_num; i++) {
108 buf = &hw->bufs[i];
109 for (j = 0; j < GROUP_BUF_MAX; j++) {
110 if (j && hw->isp_ver == ISP_V30)
111 continue;
112 dummy = &buf->dummy[j];
113 dummy->is_need_vaddr = true;
114 dummy->is_need_dbuf = true;
115 dummy->size = PAGE_ALIGN(!j ? pic_size : gain_size);
116 ret = rkisp_alloc_buffer(dev->ispdev, dummy);
117 if (ret)
118 goto err;
119 buf->dbufs.dbuf[j] = dummy->dbuf;
120 buf->dbufs.didx[j] = i * GROUP_BUF_MAX + j;
121 buf->dbufs.gain_size = PAGE_ALIGN(gain_size);
122 buf->dbufs.mfbc_size = PAGE_ALIGN(pic_size);
123 }
124 list_add_tail(&buf->dbufs.list, &hw->list);
125 ret = v4l2_subdev_call(sd, video, s_rx_buffer, &buf->dbufs, NULL);
126 if (ret)
127 goto err;
128 }
129
130 for (i = 0; i < hw->dev_num; i++) {
131 struct rkisp_device *isp = hw->isp[i];
132
133 if (!isp ||
134 (isp && !(isp->isp_inp & INP_CSI)))
135 continue;
136 ret = rkisp_alloc_common_dummy_buf(isp);
137 if (ret < 0)
138 goto err;
139 else
140 break;
141 }
142
143 hw->cur_buf = list_first_entry(&hw->list, struct rkisp_ispp_buf, list);
144 list_del(&hw->cur_buf->list);
145 buf = to_bridge_buf(hw->cur_buf);
146 val = buf->dummy[GROUP_BUF_PIC].dma_addr;
147 rkisp_write(dev->ispdev, dev->cfg->reg.y0_base, val, is_direct);
148 val += dev->cfg->offset;
149 rkisp_write(dev->ispdev, dev->cfg->reg.uv0_base, val, is_direct);
150 if (hw->isp_ver == ISP_V20) {
151 val = buf->dummy[GROUP_BUF_GAIN].dma_addr;
152 rkisp_write(dev->ispdev, dev->cfg->reg.g0_base, val, is_direct);
153 }
154
155 if (!list_empty(&hw->list)) {
156 hw->nxt_buf = list_first_entry(&hw->list,
157 struct rkisp_ispp_buf, list);
158 list_del(&hw->nxt_buf->list);
159 }
160 if (hw->nxt_buf && (dev->work_mode & ISP_ISPP_QUICK)) {
161 buf = to_bridge_buf(hw->nxt_buf);
162 val = buf->dummy[GROUP_BUF_PIC].dma_addr;
163 rkisp_write(dev->ispdev, dev->cfg->reg.y1_base, val, true);
164 val += dev->cfg->offset;
165 rkisp_write(dev->ispdev, dev->cfg->reg.uv1_base, val, true);
166 val = buf->dummy[GROUP_BUF_GAIN].dma_addr;
167 rkisp_write(dev->ispdev, dev->cfg->reg.g1_base, val, true);
168 rkisp_set_bits(dev->ispdev, MI_WR_CTRL2,
169 0, SW_GAIN_WR_PINGPONG, true);
170 }
171
172 rkisp_set_bits(dev->ispdev, CIF_VI_DPCL, 0,
173 CIF_VI_DPCL_CHAN_MODE_MP |
174 CIF_VI_DPCL_MP_MUX_MRSZ_MI, true);
175 rkisp_set_bits(dev->ispdev, MI_WR_CTRL, 0,
176 CIF_MI_CTRL_INIT_BASE_EN |
177 CIF_MI_CTRL_INIT_OFFSET_EN, true);
178 rkisp_set_bits(dev->ispdev, MI_IMSC, 0,
179 dev->cfg->frame_end_id, true);
180
181 spin_lock_irqsave(&hw->buf_lock, lock_flags);
182 hw->is_buf_init = true;
183 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
184 return 0;
185 err:
186 free_bridge_buf(dev);
187 v4l2_err(&dev->sd, "%s fail:%d\n", __func__, ret);
188 return ret;
189 }
190
config_mode(struct rkisp_bridge_device * dev)191 static int config_mode(struct rkisp_bridge_device *dev)
192 {
193 struct rkisp_hw_dev *hw = dev->ispdev->hw_dev;
194 u32 w = hw->max_in.w ? hw->max_in.w : dev->crop.width;
195 u32 h = hw->max_in.h ? hw->max_in.h : dev->crop.height;
196 u32 offs = w * h;
197 u32 pic_size = 0, gain_size = 0;
198
199 if (dev->work_mode == ISP_ISPP_INIT_FAIL) {
200 free_bridge_buf(dev);
201 return 0;
202 }
203
204 if (!dev->linked || !dev->ispdev->isp_inp) {
205 v4l2_err(&dev->sd,
206 "invalid: link:%d or isp input:0x%x\n",
207 dev->linked,
208 dev->ispdev->isp_inp);
209 return -EINVAL;
210 }
211
212 v4l2_dbg(1, rkisp_debug, &dev->sd,
213 "work mode:0x%x buf num:%d\n",
214 dev->work_mode, dev->buf_num);
215
216 if (hw->isp_ver == ISP_V20) {
217 gain_size = ALIGN(w, 64) * ALIGN(h, 128) >> 4;
218 rkisp_bridge_init_ops_v20(dev);
219 } else {
220 dev->work_mode &= ~(ISP_ISPP_FBC | ISP_ISPP_QUICK);
221 rkisp_bridge_init_ops_v30(dev);
222 }
223
224 if (dev->work_mode & ISP_ISPP_FBC) {
225 w = ALIGN(w, 16);
226 h = ALIGN(h, 16);
227 offs = w * h >> 4;
228 pic_size = offs;
229 }
230 if (dev->work_mode & ISP_ISPP_422)
231 pic_size += w * h * 2;
232 else
233 pic_size += w * h * 3 >> 1;
234 dev->cfg->offset = offs;
235
236 if (hw->isp_ver == ISP_V20) {
237 pic_size += RKISP_MOTION_DECT_TS_SIZE;
238 gain_size += RKISP_MOTION_DECT_TS_SIZE;
239 }
240 return init_buf(dev, pic_size, gain_size);
241 }
242
bridge_start_stream(struct v4l2_subdev * sd)243 static int bridge_start_stream(struct v4l2_subdev *sd)
244 {
245 struct rkisp_bridge_device *dev = v4l2_get_subdevdata(sd);
246 int ret = -EINVAL;
247
248 if (WARN_ON(dev->en))
249 return -EBUSY;
250
251 if (dev->ispdev->isp_sdev.out_fmt.fmt_type == FMT_BAYER) {
252 v4l2_err(sd, "no support raw from isp to ispp\n");
253 goto free_buf;
254 }
255
256 if (dev->ispdev->isp_inp & INP_CSI ||
257 dev->ispdev->isp_inp & INP_DVP ||
258 dev->ispdev->isp_inp & INP_LVDS ||
259 dev->ispdev->isp_inp & INP_CIF) {
260 /* Always update sensor info in case media topology changed */
261 ret = rkisp_update_sensor_info(dev->ispdev);
262 if (ret < 0) {
263 v4l2_err(sd, "update sensor info failed %d\n", ret);
264 goto free_buf;
265 }
266 }
267
268 /* enable clocks/power-domains */
269 ret = dev->ispdev->pipe.open(&dev->ispdev->pipe, &sd->entity, true);
270 if (ret < 0)
271 goto free_buf;
272
273 ret = dev->ops->start(dev);
274 if (ret)
275 goto close_pipe;
276
277 /* start sub-devices */
278 ret = dev->ispdev->pipe.set_stream(&dev->ispdev->pipe, true);
279 if (ret < 0)
280 goto stop_bridge;
281
282 ret = media_pipeline_start(&sd->entity, &dev->ispdev->pipe.pipe);
283 if (ret < 0)
284 goto pipe_stream_off;
285
286 return 0;
287 pipe_stream_off:
288 dev->ispdev->pipe.set_stream(&dev->ispdev->pipe, false);
289 stop_bridge:
290 dev->ops->stop(dev);
291 close_pipe:
292 dev->ispdev->pipe.close(&dev->ispdev->pipe);
293 hdr_destroy_buf(dev->ispdev);
294 free_buf:
295 free_bridge_buf(dev);
296 v4l2_err(&dev->sd, "%s fail:%d\n", __func__, ret);
297 return ret;
298 }
299
bridge_destroy_buf(struct rkisp_bridge_device * dev)300 static void bridge_destroy_buf(struct rkisp_bridge_device *dev)
301 {
302 free_bridge_buf(dev);
303 hdr_destroy_buf(dev->ispdev);
304 }
305
bridge_stop_stream(struct v4l2_subdev * sd)306 static int bridge_stop_stream(struct v4l2_subdev *sd)
307 {
308 struct rkisp_bridge_device *dev = v4l2_get_subdevdata(sd);
309
310 dev->ops->stop(dev);
311 media_pipeline_stop(&sd->entity);
312 dev->ispdev->pipe.set_stream(&dev->ispdev->pipe, false);
313 dev->ispdev->pipe.close(&dev->ispdev->pipe);
314 bridge_destroy_buf(dev);
315 return 0;
316 }
317
bridge_get_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_format * fmt)318 static int bridge_get_set_fmt(struct v4l2_subdev *sd,
319 struct v4l2_subdev_pad_config *cfg,
320 struct v4l2_subdev_format *fmt)
321 {
322 struct rkisp_bridge_device *dev = v4l2_get_subdevdata(sd);
323
324 if (!fmt)
325 return -EINVAL;
326
327 /* get isp out format */
328 fmt->pad = RKISP_ISP_PAD_SOURCE_PATH;
329 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
330 return v4l2_subdev_call(&dev->ispdev->isp_sdev.sd,
331 pad, get_fmt, NULL, fmt);
332 }
333
bridge_set_selection(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_selection * sel)334 static int bridge_set_selection(struct v4l2_subdev *sd,
335 struct v4l2_subdev_pad_config *cfg,
336 struct v4l2_subdev_selection *sel)
337 {
338 struct rkisp_bridge_device *dev = v4l2_get_subdevdata(sd);
339 struct rkisp_isp_subdev *isp_sd = &dev->ispdev->isp_sdev;
340 u32 src_w = isp_sd->out_crop.width;
341 u32 src_h = isp_sd->out_crop.height;
342 struct v4l2_rect *crop;
343
344 if (!sel)
345 return -EINVAL;
346 if (sel->target != V4L2_SEL_TGT_CROP)
347 return -EINVAL;
348
349 crop = &sel->r;
350 crop->left = clamp_t(u32, crop->left, 0, src_w);
351 crop->top = clamp_t(u32, crop->top, 0, src_h);
352 crop->width = clamp_t(u32, crop->width,
353 CIF_ISP_OUTPUT_W_MIN, src_w - crop->left);
354 crop->height = clamp_t(u32, crop->height,
355 CIF_ISP_OUTPUT_H_MIN, src_h - crop->top);
356
357 dev->crop = *crop;
358 return 0;
359 }
360
bridge_get_selection(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_selection * sel)361 static int bridge_get_selection(struct v4l2_subdev *sd,
362 struct v4l2_subdev_pad_config *cfg,
363 struct v4l2_subdev_selection *sel)
364 {
365 struct rkisp_bridge_device *dev = v4l2_get_subdevdata(sd);
366 struct rkisp_isp_subdev *isp_sd = &dev->ispdev->isp_sdev;
367 struct v4l2_rect *crop;
368
369 if (!sel)
370 return -EINVAL;
371
372 crop = &sel->r;
373 switch (sel->target) {
374 case V4L2_SEL_TGT_CROP_BOUNDS:
375 *crop = isp_sd->out_crop;
376 break;
377 case V4L2_SEL_TGT_CROP:
378 *crop = dev->crop;
379 break;
380 default:
381 return -EINVAL;
382 }
383
384 return 0;
385 }
386
bridge_s_rx_buffer(struct v4l2_subdev * sd,void * buf,unsigned int * size)387 static int bridge_s_rx_buffer(struct v4l2_subdev *sd,
388 void *buf, unsigned int *size)
389 {
390 struct rkisp_bridge_device *dev = v4l2_get_subdevdata(sd);
391 struct rkisp_hw_dev *hw = dev->ispdev->hw_dev;
392 struct rkisp_ispp_buf *dbufs = buf;
393 unsigned long lock_flags = 0;
394
395 spin_lock_irqsave(&hw->buf_lock, lock_flags);
396 /* size isn't using now */
397 if (!dbufs || !hw->buf_init_cnt) {
398 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
399 return -EINVAL;
400 }
401 list_add_tail(&dbufs->list, &hw->list);
402 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
403 return 0;
404 }
405
bridge_s_stream(struct v4l2_subdev * sd,int on)406 static int bridge_s_stream(struct v4l2_subdev *sd, int on)
407 {
408 struct rkisp_bridge_device *dev = v4l2_get_subdevdata(sd);
409 struct rkisp_hw_dev *hw = dev->ispdev->hw_dev;
410 int ret = 0;
411
412 v4l2_dbg(1, rkisp_debug, sd,
413 "%s %d\n", __func__, on);
414
415 mutex_lock(&hw->dev_lock);
416 if (on) {
417 memset(&dev->dbg, 0, sizeof(dev->dbg));
418 atomic_inc(&dev->ispdev->cap_dev.refcnt);
419 ret = bridge_start_stream(sd);
420 } else {
421 if (dev->en)
422 ret = bridge_stop_stream(sd);
423 atomic_dec(&dev->ispdev->cap_dev.refcnt);
424 }
425 mutex_unlock(&hw->dev_lock);
426
427 return ret;
428 }
429
bridge_s_power(struct v4l2_subdev * sd,int on)430 static int bridge_s_power(struct v4l2_subdev *sd, int on)
431 {
432 int ret = 0;
433
434 v4l2_dbg(1, rkisp_debug, sd,
435 "%s %d\n", __func__, on);
436
437 if (on)
438 ret = v4l2_pipeline_pm_get(&sd->entity);
439 else
440 v4l2_pipeline_pm_put(&sd->entity);
441
442 return ret;
443 }
444
bridge_ioctl(struct v4l2_subdev * sd,unsigned int cmd,void * arg)445 static long bridge_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
446 {
447 struct rkisp_bridge_device *dev = v4l2_get_subdevdata(sd);
448 struct rkisp_ispp_mode *mode;
449 struct max_input *max_in;
450 long ret = 0;
451
452 switch (cmd) {
453 case RKISP_ISPP_CMD_SET_FMT:
454 max_in = arg;
455 dev->ispdev->hw_dev->max_in = *max_in;
456 break;
457 case RKISP_ISPP_CMD_SET_MODE:
458 mode = arg;
459 dev->work_mode = mode->work_mode;
460 dev->buf_num = mode->buf_num;
461 ret = config_mode(dev);
462 rkisp_chk_tb_over(dev->ispdev);
463 break;
464 default:
465 ret = -ENOIOCTLCMD;
466 }
467
468 return ret;
469 }
470
471 static const struct v4l2_subdev_pad_ops bridge_pad_ops = {
472 .set_fmt = bridge_get_set_fmt,
473 .get_fmt = bridge_get_set_fmt,
474 .get_selection = bridge_get_selection,
475 .set_selection = bridge_set_selection,
476 };
477
478 static const struct v4l2_subdev_video_ops bridge_video_ops = {
479 .s_rx_buffer = bridge_s_rx_buffer,
480 .s_stream = bridge_s_stream,
481 };
482
483 static const struct v4l2_subdev_core_ops bridge_core_ops = {
484 .s_power = bridge_s_power,
485 .ioctl = bridge_ioctl,
486 };
487
488 static struct v4l2_subdev_ops bridge_v4l2_ops = {
489 .core = &bridge_core_ops,
490 .video = &bridge_video_ops,
491 .pad = &bridge_pad_ops,
492 };
493
rkisp_bridge_update_mi(struct rkisp_device * dev,u32 isp_mis)494 void rkisp_bridge_update_mi(struct rkisp_device *dev, u32 isp_mis)
495 {
496 struct rkisp_bridge_device *br = &dev->br_dev;
497 struct rkisp_hw_dev *hw = dev->hw_dev;
498 unsigned long lock_flags = 0;
499
500 if ((dev->isp_ver != ISP_V20 && dev->isp_ver != ISP_V30) ||
501 !br->en || br->work_mode & ISP_ISPP_QUICK ||
502 isp_mis & CIF_ISP_FRAME)
503 return;
504
505 br->fs_ns = ktime_get_ns();
506 spin_lock_irqsave(&hw->buf_lock, lock_flags);
507 if (!hw->nxt_buf && !list_empty(&hw->list)) {
508 hw->nxt_buf = list_first_entry(&hw->list,
509 struct rkisp_ispp_buf, list);
510 list_del(&hw->nxt_buf->list);
511 }
512 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
513
514 br->ops->update_mi(br);
515 }
516
rkisp_bridge_isr(u32 * mis_val,struct rkisp_device * dev)517 void rkisp_bridge_isr(u32 *mis_val, struct rkisp_device *dev)
518 {
519 struct rkisp_bridge_device *bridge = &dev->br_dev;
520 void __iomem *base = dev->base_addr;
521 u32 irq;
522
523 if (!bridge->en)
524 return;
525
526 if (!bridge->cfg ||
527 (bridge->cfg &&
528 !(*mis_val & bridge->cfg->frame_end_id)))
529 return;
530
531 irq = bridge->cfg->frame_end_id;
532 *mis_val &= ~irq;
533 writel(irq, base + CIF_MI_ICR);
534
535 irq = (irq == MI_MPFBC_FRAME) ? ISP_FRAME_MPFBC : ISP_FRAME_MP;
536 bridge->ops->frame_end(bridge, FRAME_IRQ);
537
538 rkisp_check_idle(dev, irq);
539 }
540
check_remote_node(struct rkisp_device * ispdev)541 static int check_remote_node(struct rkisp_device *ispdev)
542 {
543 struct device *dev = ispdev->dev;
544 struct device_node *parent = dev->of_node;
545 struct device_node *remote = NULL;
546 int i, j;
547
548 for (i = 0; i < 3; i++) {
549 for (j = 0; j < 2; j++) {
550 remote = of_graph_get_remote_node(parent, i, j);
551 if (!remote)
552 continue;
553 of_node_put(remote);
554 if (strstr(of_node_full_name(remote), "ispp"))
555 return 0;
556 }
557 }
558
559 return -ENODEV;
560 }
561
rkisp_register_bridge_subdev(struct rkisp_device * dev,struct v4l2_device * v4l2_dev)562 int rkisp_register_bridge_subdev(struct rkisp_device *dev,
563 struct v4l2_device *v4l2_dev)
564 {
565 struct rkisp_bridge_device *bridge = &dev->br_dev;
566 struct v4l2_subdev *sd;
567 struct media_entity *source, *sink;
568 int ret;
569
570 memset(bridge, 0, sizeof(*bridge));
571 if ((dev->isp_ver != ISP_V20 && dev->isp_ver != ISP_V30) ||
572 check_remote_node(dev) < 0)
573 return 0;
574
575 bridge->ispdev = dev;
576 sd = &bridge->sd;
577 v4l2_subdev_init(sd, &bridge_v4l2_ops);
578 //sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
579 sd->entity.obj_type = 0;
580 snprintf(sd->name, sizeof(sd->name), "%s", BRIDGE_DEV_NAME);
581 bridge->pad.flags = MEDIA_PAD_FL_SINK;
582 ret = media_entity_pads_init(&sd->entity, 1, &bridge->pad);
583 if (ret < 0)
584 return ret;
585 sd->owner = THIS_MODULE;
586 v4l2_set_subdevdata(sd, bridge);
587 sd->grp_id = GRP_ID_ISP_BRIDGE;
588 ret = v4l2_device_register_subdev(v4l2_dev, sd);
589 if (ret < 0) {
590 v4l2_err(sd, "Failed to register subdev\n");
591 goto free_media;
592 }
593 bridge->crop = dev->isp_sdev.out_crop;
594 /* bridge links */
595 bridge->linked = true;
596 source = &dev->isp_sdev.sd.entity;
597 sink = &sd->entity;
598 ret = media_create_pad_link(source, RKISP_ISP_PAD_SOURCE_PATH,
599 sink, 0, bridge->linked);
600 init_waitqueue_head(&bridge->done);
601 bridge->wq = alloc_workqueue("rkisp bridge workqueue",
602 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
603 hrtimer_init(&bridge->frame_qst, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
604 return ret;
605
606 free_media:
607 media_entity_cleanup(&sd->entity);
608 return ret;
609 }
610
rkisp_unregister_bridge_subdev(struct rkisp_device * dev)611 void rkisp_unregister_bridge_subdev(struct rkisp_device *dev)
612 {
613 struct v4l2_subdev *sd = &dev->br_dev.sd;
614
615 if ((dev->isp_ver != ISP_V20 && dev->isp_ver != ISP_V30) ||
616 check_remote_node(dev) < 0)
617 return;
618 v4l2_device_unregister_subdev(sd);
619 media_entity_cleanup(&sd->entity);
620 }
621
rkisp_get_bridge_sd(struct platform_device * dev,struct v4l2_subdev ** sd)622 void rkisp_get_bridge_sd(struct platform_device *dev,
623 struct v4l2_subdev **sd)
624 {
625 struct rkisp_device *isp_dev = platform_get_drvdata(dev);
626
627 if (isp_dev)
628 *sd = &isp_dev->br_dev.sd;
629 else
630 *sd = NULL;
631 }
632 EXPORT_SYMBOL(rkisp_get_bridge_sd);
633