1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd. */
3
4 #include <linux/delay.h>
5 #include <linux/of.h>
6 #include <linux/of_graph.h>
7 #include <linux/of_platform.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/slab.h>
10 #include <media/v4l2-common.h>
11 #include <media/v4l2-event.h>
12 #include <media/v4l2-fh.h>
13 #include <media/v4l2-ioctl.h>
14 #include <media/v4l2-subdev.h>
15 #include <media/videobuf2-dma-contig.h>
16 #include <linux/dma-iommu.h>
17 #include <linux/rk-camera-module.h>
18 #include "dev.h"
19 #include "regs.h"
20
21 static inline
to_bridge_buf(struct rkisp_ispp_buf * dbufs)22 struct rkisp_bridge_buf *to_bridge_buf(struct rkisp_ispp_buf *dbufs)
23 {
24 return container_of(dbufs, struct rkisp_bridge_buf, dbufs);
25 }
26
free_bridge_buf(struct rkisp_bridge_device * dev)27 static void free_bridge_buf(struct rkisp_bridge_device *dev)
28 {
29 struct rkisp_hw_dev *hw = dev->ispdev->hw_dev;
30 struct rkisp_bridge_buf *buf;
31 struct rkisp_ispp_buf *dbufs;
32 unsigned long lock_flags = 0;
33 int i, j;
34
35 spin_lock_irqsave(&hw->buf_lock, lock_flags);
36 if (--hw->buf_init_cnt > 0) {
37 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
38 return;
39 }
40
41 v4l2_dbg(1, rkisp_debug, &dev->ispdev->v4l2_dev,
42 "%s\n", __func__);
43
44 if (hw->cur_buf) {
45 list_add_tail(&hw->cur_buf->list, &hw->list);
46 if (hw->cur_buf == hw->nxt_buf)
47 hw->nxt_buf = NULL;
48 hw->cur_buf = NULL;
49 }
50
51 if (hw->nxt_buf) {
52 list_add_tail(&hw->nxt_buf->list, &hw->list);
53 hw->nxt_buf = NULL;
54 }
55
56 if (dev->ispdev->cur_fbcgain) {
57 list_add_tail(&dev->ispdev->cur_fbcgain->list, &hw->list);
58 dev->ispdev->cur_fbcgain = NULL;
59 }
60
61 while (!list_empty(&hw->rpt_list)) {
62 dbufs = list_first_entry(&hw->rpt_list,
63 struct rkisp_ispp_buf, list);
64 list_del(&dbufs->list);
65 list_add_tail(&dbufs->list, &hw->list);
66 }
67
68 while (!list_empty(&hw->list)) {
69 dbufs = list_first_entry(&hw->list,
70 struct rkisp_ispp_buf, list);
71 list_del(&dbufs->list);
72 }
73
74 hw->is_buf_init = false;
75 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
76 for (i = 0; i < BRIDGE_BUF_MAX; i++) {
77 buf = &hw->bufs[i];
78 for (j = 0; j < GROUP_BUF_MAX; j++)
79 rkisp_free_buffer(dev->ispdev, &buf->dummy[j]);
80 }
81
82 rkisp_free_common_dummy_buf(dev->ispdev);
83 }
84
init_buf(struct rkisp_bridge_device * dev,u32 pic_size,u32 gain_size)85 static int init_buf(struct rkisp_bridge_device *dev, u32 pic_size, u32 gain_size)
86 {
87 struct v4l2_subdev *sd = v4l2_get_subdev_hostdata(&dev->sd);
88 struct rkisp_hw_dev *hw = dev->ispdev->hw_dev;
89 struct rkisp_bridge_buf *buf;
90 struct rkisp_dummy_buffer *dummy;
91 int i, j, val, ret = 0;
92 unsigned long lock_flags = 0;
93 bool is_direct = (hw->isp_ver == ISP_V20) ? true : false;
94
95 spin_lock_irqsave(&hw->buf_lock, lock_flags);
96 if (++hw->buf_init_cnt > 1) {
97 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
98 return 0;
99 }
100 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
101
102 v4l2_dbg(1, rkisp_debug, &dev->ispdev->v4l2_dev,
103 "%s pic size:%d gain size:%d\n",
104 __func__, pic_size, gain_size);
105
106 INIT_LIST_HEAD(&hw->list);
107 for (i = 0; i < dev->buf_num; i++) {
108 buf = &hw->bufs[i];
109 for (j = 0; j < GROUP_BUF_MAX; j++) {
110 if (j && hw->isp_ver == ISP_V30)
111 continue;
112 dummy = &buf->dummy[j];
113 dummy->is_need_vaddr = true;
114 dummy->is_need_dbuf = true;
115 dummy->size = PAGE_ALIGN(!j ? pic_size : gain_size);
116 ret = rkisp_alloc_buffer(dev->ispdev, dummy);
117 if (ret)
118 goto err;
119 buf->dbufs.dbuf[j] = dummy->dbuf;
120 buf->dbufs.didx[j] = i * GROUP_BUF_MAX + j;
121 buf->dbufs.gain_size = PAGE_ALIGN(gain_size);
122 buf->dbufs.mfbc_size = PAGE_ALIGN(pic_size);
123 }
124 list_add_tail(&buf->dbufs.list, &hw->list);
125 ret = v4l2_subdev_call(sd, video, s_rx_buffer, &buf->dbufs, NULL);
126 if (ret)
127 goto err;
128 }
129
130 for (i = 0; i < hw->dev_num; i++) {
131 struct rkisp_device *isp = hw->isp[i];
132
133 if (!(isp->isp_inp & INP_CSI))
134 continue;
135 ret = rkisp_alloc_common_dummy_buf(isp);
136 if (ret < 0)
137 goto err;
138 else
139 break;
140 }
141
142 hw->cur_buf = list_first_entry(&hw->list, struct rkisp_ispp_buf, list);
143 list_del(&hw->cur_buf->list);
144 buf = to_bridge_buf(hw->cur_buf);
145 val = buf->dummy[GROUP_BUF_PIC].dma_addr;
146 rkisp_write(dev->ispdev, dev->cfg->reg.y0_base, val, is_direct);
147 val += dev->cfg->offset;
148 rkisp_write(dev->ispdev, dev->cfg->reg.uv0_base, val, is_direct);
149 if (hw->isp_ver == ISP_V20) {
150 val = buf->dummy[GROUP_BUF_GAIN].dma_addr;
151 rkisp_write(dev->ispdev, dev->cfg->reg.g0_base, val, is_direct);
152 }
153
154 if (!list_empty(&hw->list)) {
155 hw->nxt_buf = list_first_entry(&hw->list,
156 struct rkisp_ispp_buf, list);
157 list_del(&hw->nxt_buf->list);
158 }
159 if (hw->nxt_buf && (dev->work_mode & ISP_ISPP_QUICK)) {
160 buf = to_bridge_buf(hw->nxt_buf);
161 val = buf->dummy[GROUP_BUF_PIC].dma_addr;
162 rkisp_write(dev->ispdev, dev->cfg->reg.y1_base, val, true);
163 val += dev->cfg->offset;
164 rkisp_write(dev->ispdev, dev->cfg->reg.uv1_base, val, true);
165 val = buf->dummy[GROUP_BUF_GAIN].dma_addr;
166 rkisp_write(dev->ispdev, dev->cfg->reg.g1_base, val, true);
167 rkisp_set_bits(dev->ispdev, MI_WR_CTRL2,
168 0, SW_GAIN_WR_PINGPONG, true);
169 }
170
171 rkisp_set_bits(dev->ispdev, CIF_VI_DPCL, 0,
172 CIF_VI_DPCL_CHAN_MODE_MP |
173 CIF_VI_DPCL_MP_MUX_MRSZ_MI, true);
174 rkisp_set_bits(dev->ispdev, MI_WR_CTRL, 0,
175 CIF_MI_CTRL_INIT_BASE_EN |
176 CIF_MI_CTRL_INIT_OFFSET_EN, true);
177 rkisp_set_bits(dev->ispdev, MI_IMSC, 0,
178 dev->cfg->frame_end_id, true);
179
180 spin_lock_irqsave(&hw->buf_lock, lock_flags);
181 hw->is_buf_init = true;
182 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
183 return 0;
184 err:
185 free_bridge_buf(dev);
186 v4l2_err(&dev->sd, "%s fail:%d\n", __func__, ret);
187 return ret;
188 }
189
config_mode(struct rkisp_bridge_device * dev)190 static int config_mode(struct rkisp_bridge_device *dev)
191 {
192 struct rkisp_hw_dev *hw = dev->ispdev->hw_dev;
193 u32 w = hw->max_in.w ? hw->max_in.w : dev->crop.width;
194 u32 h = hw->max_in.h ? hw->max_in.h : dev->crop.height;
195 u32 offs = w * h;
196 u32 pic_size = 0, gain_size = 0;
197
198 if (dev->work_mode == ISP_ISPP_INIT_FAIL) {
199 free_bridge_buf(dev);
200 return 0;
201 }
202
203 if (!dev->linked || !dev->ispdev->isp_inp) {
204 v4l2_err(&dev->sd,
205 "invalid: link:%d or isp input:0x%x\n",
206 dev->linked,
207 dev->ispdev->isp_inp);
208 return -EINVAL;
209 }
210
211 v4l2_dbg(1, rkisp_debug, &dev->sd,
212 "work mode:0x%x buf num:%d\n",
213 dev->work_mode, dev->buf_num);
214
215 if (hw->isp_ver == ISP_V20) {
216 gain_size = ALIGN(w, 64) * ALIGN(h, 128) >> 4;
217 gain_size += RKISP_MOTION_DECT_TS_SIZE;
218 pic_size += RKISP_MOTION_DECT_TS_SIZE;
219 rkisp_bridge_init_ops_v20(dev);
220 } else {
221 dev->work_mode &= ~(ISP_ISPP_FBC | ISP_ISPP_QUICK);
222 rkisp_bridge_init_ops_v30(dev);
223 }
224
225 if (dev->work_mode & ISP_ISPP_FBC) {
226 w = ALIGN(w, 16);
227 h = ALIGN(h, 16);
228 offs = w * h >> 4;
229 pic_size = offs;
230 }
231 if (dev->work_mode & ISP_ISPP_422)
232 pic_size += w * h * 2;
233 else
234 pic_size += w * h * 3 >> 1;
235 dev->cfg->offset = offs;
236
237 return init_buf(dev, pic_size, gain_size);
238 }
239
bridge_start_stream(struct v4l2_subdev * sd)240 static int bridge_start_stream(struct v4l2_subdev *sd)
241 {
242 struct rkisp_bridge_device *dev = v4l2_get_subdevdata(sd);
243 int ret = -EINVAL;
244
245 if (WARN_ON(dev->en))
246 return -EBUSY;
247
248 if (dev->ispdev->isp_sdev.out_fmt.fmt_type == FMT_BAYER) {
249 v4l2_err(sd, "no support raw from isp to ispp\n");
250 goto free_buf;
251 }
252
253 if (dev->ispdev->isp_inp & INP_CSI ||
254 dev->ispdev->isp_inp & INP_DVP ||
255 dev->ispdev->isp_inp & INP_LVDS ||
256 dev->ispdev->isp_inp & INP_CIF) {
257 /* Always update sensor info in case media topology changed */
258 ret = rkisp_update_sensor_info(dev->ispdev);
259 if (ret < 0) {
260 v4l2_err(sd, "update sensor info failed %d\n", ret);
261 goto free_buf;
262 }
263 }
264
265 /* enable clocks/power-domains */
266 ret = dev->ispdev->pipe.open(&dev->ispdev->pipe, &sd->entity, true);
267 if (ret < 0)
268 goto free_buf;
269
270 ret = dev->ops->start(dev);
271 if (ret)
272 goto close_pipe;
273
274 /* start sub-devices */
275 ret = dev->ispdev->pipe.set_stream(&dev->ispdev->pipe, true);
276 if (ret < 0)
277 goto stop_bridge;
278
279 ret = media_pipeline_start(&sd->entity, &dev->ispdev->pipe.pipe);
280 if (ret < 0)
281 goto pipe_stream_off;
282
283 return 0;
284 pipe_stream_off:
285 dev->ispdev->pipe.set_stream(&dev->ispdev->pipe, false);
286 stop_bridge:
287 dev->ops->stop(dev);
288 close_pipe:
289 dev->ispdev->pipe.close(&dev->ispdev->pipe);
290 hdr_destroy_buf(dev->ispdev);
291 free_buf:
292 free_bridge_buf(dev);
293 v4l2_err(&dev->sd, "%s fail:%d\n", __func__, ret);
294 return ret;
295 }
296
bridge_destroy_buf(struct rkisp_bridge_device * dev)297 static void bridge_destroy_buf(struct rkisp_bridge_device *dev)
298 {
299 free_bridge_buf(dev);
300 hdr_destroy_buf(dev->ispdev);
301 }
302
bridge_stop_stream(struct v4l2_subdev * sd)303 static int bridge_stop_stream(struct v4l2_subdev *sd)
304 {
305 struct rkisp_bridge_device *dev = v4l2_get_subdevdata(sd);
306
307 dev->ops->stop(dev);
308 media_pipeline_stop(&sd->entity);
309 dev->ispdev->pipe.set_stream(&dev->ispdev->pipe, false);
310 dev->ispdev->pipe.close(&dev->ispdev->pipe);
311 bridge_destroy_buf(dev);
312 return 0;
313 }
314
bridge_get_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_format * fmt)315 static int bridge_get_set_fmt(struct v4l2_subdev *sd,
316 struct v4l2_subdev_pad_config *cfg,
317 struct v4l2_subdev_format *fmt)
318 {
319 struct rkisp_bridge_device *dev = v4l2_get_subdevdata(sd);
320
321 if (!fmt)
322 return -EINVAL;
323
324 /* get isp out format */
325 fmt->pad = RKISP_ISP_PAD_SOURCE_PATH;
326 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
327 return v4l2_subdev_call(&dev->ispdev->isp_sdev.sd,
328 pad, get_fmt, NULL, fmt);
329 }
330
bridge_set_selection(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_selection * sel)331 static int bridge_set_selection(struct v4l2_subdev *sd,
332 struct v4l2_subdev_pad_config *cfg,
333 struct v4l2_subdev_selection *sel)
334 {
335 struct rkisp_bridge_device *dev = v4l2_get_subdevdata(sd);
336 struct rkisp_isp_subdev *isp_sd = &dev->ispdev->isp_sdev;
337 u32 src_w = isp_sd->out_crop.width;
338 u32 src_h = isp_sd->out_crop.height;
339 struct v4l2_rect *crop;
340
341 if (!sel)
342 return -EINVAL;
343 if (sel->target != V4L2_SEL_TGT_CROP)
344 return -EINVAL;
345
346 crop = &sel->r;
347 crop->left = clamp_t(u32, crop->left, 0, src_w);
348 crop->top = clamp_t(u32, crop->top, 0, src_h);
349 crop->width = clamp_t(u32, crop->width,
350 CIF_ISP_OUTPUT_W_MIN, src_w - crop->left);
351 crop->height = clamp_t(u32, crop->height,
352 CIF_ISP_OUTPUT_H_MIN, src_h - crop->top);
353
354 dev->crop = *crop;
355 return 0;
356 }
357
bridge_get_selection(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_selection * sel)358 static int bridge_get_selection(struct v4l2_subdev *sd,
359 struct v4l2_subdev_pad_config *cfg,
360 struct v4l2_subdev_selection *sel)
361 {
362 struct rkisp_bridge_device *dev = v4l2_get_subdevdata(sd);
363 struct rkisp_isp_subdev *isp_sd = &dev->ispdev->isp_sdev;
364 struct v4l2_rect *crop;
365
366 if (!sel)
367 return -EINVAL;
368
369 crop = &sel->r;
370 switch (sel->target) {
371 case V4L2_SEL_TGT_CROP_BOUNDS:
372 *crop = isp_sd->out_crop;
373 break;
374 case V4L2_SEL_TGT_CROP:
375 *crop = dev->crop;
376 break;
377 default:
378 return -EINVAL;
379 }
380
381 return 0;
382 }
383
bridge_s_rx_buffer(struct v4l2_subdev * sd,void * buf,unsigned int * size)384 static int bridge_s_rx_buffer(struct v4l2_subdev *sd,
385 void *buf, unsigned int *size)
386 {
387 struct rkisp_bridge_device *dev = v4l2_get_subdevdata(sd);
388 struct rkisp_hw_dev *hw = dev->ispdev->hw_dev;
389 struct rkisp_ispp_buf *dbufs = buf;
390 unsigned long lock_flags = 0;
391
392 spin_lock_irqsave(&hw->buf_lock, lock_flags);
393 /* size isn't using now */
394 if (!dbufs || !hw->buf_init_cnt) {
395 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
396 return -EINVAL;
397 }
398 list_add_tail(&dbufs->list, &hw->list);
399 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
400 return 0;
401 }
402
bridge_s_stream(struct v4l2_subdev * sd,int on)403 static int bridge_s_stream(struct v4l2_subdev *sd, int on)
404 {
405 struct rkisp_bridge_device *dev = v4l2_get_subdevdata(sd);
406 struct rkisp_hw_dev *hw = dev->ispdev->hw_dev;
407 int ret = 0;
408
409 v4l2_dbg(1, rkisp_debug, sd,
410 "%s %d\n", __func__, on);
411
412 mutex_lock(&hw->dev_lock);
413 if (on) {
414 memset(&dev->dbg, 0, sizeof(dev->dbg));
415 atomic_inc(&dev->ispdev->cap_dev.refcnt);
416 ret = bridge_start_stream(sd);
417 } else {
418 if (dev->en)
419 ret = bridge_stop_stream(sd);
420 atomic_dec(&dev->ispdev->cap_dev.refcnt);
421 }
422 mutex_unlock(&hw->dev_lock);
423
424 return ret;
425 }
426
bridge_s_power(struct v4l2_subdev * sd,int on)427 static int bridge_s_power(struct v4l2_subdev *sd, int on)
428 {
429 int ret = 0;
430
431 v4l2_dbg(1, rkisp_debug, sd,
432 "%s %d\n", __func__, on);
433
434 if (on)
435 ret = v4l2_pipeline_pm_get(&sd->entity);
436 else
437 v4l2_pipeline_pm_put(&sd->entity);
438
439 return ret;
440 }
441
bridge_ioctl(struct v4l2_subdev * sd,unsigned int cmd,void * arg)442 static long bridge_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
443 {
444 struct rkisp_bridge_device *dev = v4l2_get_subdevdata(sd);
445 struct rkisp_ispp_mode *mode;
446 struct max_input *max_in;
447 long ret = 0;
448
449 switch (cmd) {
450 case RKISP_ISPP_CMD_SET_FMT:
451 max_in = arg;
452 dev->ispdev->hw_dev->max_in = *max_in;
453 break;
454 case RKISP_ISPP_CMD_SET_MODE:
455 mode = arg;
456 dev->work_mode = mode->work_mode;
457 dev->buf_num = mode->buf_num;
458 ret = config_mode(dev);
459 rkisp_chk_tb_over(dev->ispdev);
460 break;
461 default:
462 ret = -ENOIOCTLCMD;
463 }
464
465 return ret;
466 }
467
468 static const struct v4l2_subdev_pad_ops bridge_pad_ops = {
469 .set_fmt = bridge_get_set_fmt,
470 .get_fmt = bridge_get_set_fmt,
471 .get_selection = bridge_get_selection,
472 .set_selection = bridge_set_selection,
473 };
474
475 static const struct v4l2_subdev_video_ops bridge_video_ops = {
476 .s_rx_buffer = bridge_s_rx_buffer,
477 .s_stream = bridge_s_stream,
478 };
479
480 static const struct v4l2_subdev_core_ops bridge_core_ops = {
481 .s_power = bridge_s_power,
482 .ioctl = bridge_ioctl,
483 };
484
485 static struct v4l2_subdev_ops bridge_v4l2_ops = {
486 .core = &bridge_core_ops,
487 .video = &bridge_video_ops,
488 .pad = &bridge_pad_ops,
489 };
490
rkisp_bridge_update_mi(struct rkisp_device * dev,u32 isp_mis)491 void rkisp_bridge_update_mi(struct rkisp_device *dev, u32 isp_mis)
492 {
493 struct rkisp_bridge_device *br = &dev->br_dev;
494 struct rkisp_hw_dev *hw = dev->hw_dev;
495 unsigned long lock_flags = 0;
496
497 if ((dev->isp_ver != ISP_V20 && dev->isp_ver != ISP_V30) ||
498 !br->en || br->work_mode & ISP_ISPP_QUICK ||
499 isp_mis & CIF_ISP_FRAME)
500 return;
501
502 br->fs_ns = ktime_get_ns();
503 spin_lock_irqsave(&hw->buf_lock, lock_flags);
504 if (!hw->nxt_buf && !list_empty(&hw->list)) {
505 hw->nxt_buf = list_first_entry(&hw->list,
506 struct rkisp_ispp_buf, list);
507 list_del(&hw->nxt_buf->list);
508 }
509 spin_unlock_irqrestore(&hw->buf_lock, lock_flags);
510
511 br->ops->update_mi(br);
512 }
513
rkisp_bridge_isr(u32 * mis_val,struct rkisp_device * dev)514 void rkisp_bridge_isr(u32 *mis_val, struct rkisp_device *dev)
515 {
516 struct rkisp_bridge_device *bridge = &dev->br_dev;
517 void __iomem *base = dev->base_addr;
518 u32 irq;
519
520 if (!bridge->en)
521 return;
522
523 if (!bridge->cfg ||
524 (bridge->cfg &&
525 !(*mis_val & bridge->cfg->frame_end_id)))
526 return;
527
528 irq = bridge->cfg->frame_end_id;
529 *mis_val &= ~irq;
530 writel(irq, base + CIF_MI_ICR);
531
532 irq = (irq == MI_MPFBC_FRAME) ? ISP_FRAME_MPFBC : ISP_FRAME_MP;
533 bridge->ops->frame_end(bridge, FRAME_IRQ);
534
535 rkisp_check_idle(dev, irq);
536 }
537
check_remote_node(struct rkisp_device * ispdev)538 static int check_remote_node(struct rkisp_device *ispdev)
539 {
540 struct device *dev = ispdev->dev;
541 struct device_node *parent = dev->of_node;
542 struct device_node *remote = NULL;
543 int i, j;
544
545 for (i = 0; i < 3; i++) {
546 for (j = 0; j < 2; j++) {
547 remote = of_graph_get_remote_node(parent, i, j);
548 if (!remote)
549 continue;
550 of_node_put(remote);
551 if (strstr(of_node_full_name(remote), "ispp"))
552 return 0;
553 }
554 }
555
556 return -ENODEV;
557 }
558
rkisp_register_bridge_subdev(struct rkisp_device * dev,struct v4l2_device * v4l2_dev)559 int rkisp_register_bridge_subdev(struct rkisp_device *dev,
560 struct v4l2_device *v4l2_dev)
561 {
562 struct rkisp_bridge_device *bridge = &dev->br_dev;
563 struct v4l2_subdev *sd;
564 struct media_entity *source, *sink;
565 int ret;
566
567 memset(bridge, 0, sizeof(*bridge));
568 if ((dev->isp_ver != ISP_V20 && dev->isp_ver != ISP_V30) ||
569 check_remote_node(dev) < 0)
570 return 0;
571
572 bridge->ispdev = dev;
573 sd = &bridge->sd;
574 v4l2_subdev_init(sd, &bridge_v4l2_ops);
575 //sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
576 sd->entity.obj_type = 0;
577 snprintf(sd->name, sizeof(sd->name), "%s", BRIDGE_DEV_NAME);
578 bridge->pad.flags = MEDIA_PAD_FL_SINK;
579 ret = media_entity_pads_init(&sd->entity, 1, &bridge->pad);
580 if (ret < 0)
581 return ret;
582 sd->owner = THIS_MODULE;
583 v4l2_set_subdevdata(sd, bridge);
584 sd->grp_id = GRP_ID_ISP_BRIDGE;
585 ret = v4l2_device_register_subdev(v4l2_dev, sd);
586 if (ret < 0) {
587 v4l2_err(sd, "Failed to register subdev\n");
588 goto free_media;
589 }
590 bridge->crop = dev->isp_sdev.out_crop;
591 /* bridge links */
592 bridge->linked = true;
593 source = &dev->isp_sdev.sd.entity;
594 sink = &sd->entity;
595 ret = media_create_pad_link(source, RKISP_ISP_PAD_SOURCE_PATH,
596 sink, 0, bridge->linked);
597 init_waitqueue_head(&bridge->done);
598 bridge->wq = alloc_workqueue("rkisp bridge workqueue",
599 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
600 hrtimer_init(&bridge->frame_qst, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
601 return ret;
602
603 free_media:
604 media_entity_cleanup(&sd->entity);
605 return ret;
606 }
607
rkisp_unregister_bridge_subdev(struct rkisp_device * dev)608 void rkisp_unregister_bridge_subdev(struct rkisp_device *dev)
609 {
610 struct v4l2_subdev *sd = &dev->br_dev.sd;
611
612 if ((dev->isp_ver != ISP_V20 && dev->isp_ver != ISP_V30) ||
613 check_remote_node(dev) < 0)
614 return;
615 v4l2_device_unregister_subdev(sd);
616 media_entity_cleanup(&sd->entity);
617 }
618
rkisp_get_bridge_sd(struct platform_device * dev,struct v4l2_subdev ** sd)619 void rkisp_get_bridge_sd(struct platform_device *dev,
620 struct v4l2_subdev **sd)
621 {
622 struct rkisp_device *isp_dev = platform_get_drvdata(dev);
623
624 if (isp_dev)
625 *sd = &isp_dev->br_dev.sd;
626 else
627 *sd = NULL;
628 }
629 EXPORT_SYMBOL(rkisp_get_bridge_sd);
630