1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Rockchip MIPI CSI2 Driver
4 *
5 * Copyright (C) 2019 Rockchip Electronics Co., Ltd.
6 */
7
8 #include <linux/clk.h>
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/iopoll.h>
12 #include <linux/irq.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_graph.h>
16 #include <linux/platform_device.h>
17 #include <linux/reset.h>
18 #include "mipi-csi2.h"
19
20 static int csi2_debug;
21 module_param_named(debug_csi2, csi2_debug, int, 0644);
22 MODULE_PARM_DESC(debug_csi2, "Debug level (0-1)");
23
24 #define write_csihost_reg(base, addr, val) writel(val, (addr) + (base))
25 #define read_csihost_reg(base, addr) readl((addr) + (base))
26
27 static ATOMIC_NOTIFIER_HEAD(g_csi_host_chain);
28
rkcif_csi2_register_notifier(struct notifier_block * nb)29 int rkcif_csi2_register_notifier(struct notifier_block *nb)
30 {
31 return atomic_notifier_chain_register(&g_csi_host_chain, nb);
32 }
33
rkcif_csi2_unregister_notifier(struct notifier_block * nb)34 int rkcif_csi2_unregister_notifier(struct notifier_block *nb)
35 {
36 return atomic_notifier_chain_unregister(&g_csi_host_chain, nb);
37 }
38
sd_to_dev(struct v4l2_subdev * sdev)39 static inline struct csi2_dev *sd_to_dev(struct v4l2_subdev *sdev)
40 {
41 return container_of(sdev, struct csi2_dev, sd);
42 }
43
sd_to_sensor(struct csi2_dev * csi2,struct v4l2_subdev * sd)44 static struct csi2_sensor *sd_to_sensor(struct csi2_dev *csi2,
45 struct v4l2_subdev *sd)
46 {
47 int i;
48
49 for (i = 0; i < csi2->num_sensors; ++i)
50 if (csi2->sensors[i].sd == sd)
51 return &csi2->sensors[i];
52
53 return NULL;
54 }
55
get_remote_sensor(struct v4l2_subdev * sd)56 static struct v4l2_subdev *get_remote_sensor(struct v4l2_subdev *sd)
57 {
58 struct media_pad *local, *remote;
59 struct media_entity *sensor_me;
60
61 local = &sd->entity.pads[RK_CSI2_PAD_SINK];
62 remote = media_entity_remote_pad(local);
63 if (!remote) {
64 v4l2_warn(sd, "No link between dphy and sensor\n");
65 return NULL;
66 }
67
68 sensor_me = media_entity_remote_pad(local)->entity;
69 return media_entity_to_v4l2_subdev(sensor_me);
70 }
71
csi2_update_sensor_info(struct csi2_dev * csi2)72 static void csi2_update_sensor_info(struct csi2_dev *csi2)
73 {
74 struct csi2_sensor *sensor = &csi2->sensors[0];
75 struct v4l2_mbus_config mbus;
76 int ret = 0;
77
78 ret = v4l2_subdev_call(sensor->sd, pad, get_mbus_config, 0, &mbus);
79 if (ret) {
80 v4l2_err(&csi2->sd, "update sensor info failed!\n");
81 return;
82 }
83
84 csi2->bus.flags = mbus.flags;
85 switch (csi2->bus.flags & V4L2_MBUS_CSI2_LANES) {
86 case V4L2_MBUS_CSI2_1_LANE:
87 csi2->bus.num_data_lanes = 1;
88 break;
89 case V4L2_MBUS_CSI2_2_LANE:
90 csi2->bus.num_data_lanes = 2;
91 break;
92 case V4L2_MBUS_CSI2_3_LANE:
93 csi2->bus.num_data_lanes = 3;
94 break;
95 case V4L2_MBUS_CSI2_4_LANE:
96 csi2->bus.num_data_lanes = 4;
97 break;
98 default:
99 v4l2_warn(&csi2->sd, "lane num is invalid\n");
100 csi2->bus.num_data_lanes = 0;
101 break;
102 }
103
104 }
105
csi2_hw_do_reset(struct csi2_dev * csi2)106 static void csi2_hw_do_reset(struct csi2_dev *csi2)
107 {
108 reset_control_assert(csi2->rsts_bulk);
109
110 udelay(5);
111
112 reset_control_deassert(csi2->rsts_bulk);
113 }
114
csi2_enable_clks(struct csi2_dev * csi2)115 static int csi2_enable_clks(struct csi2_dev *csi2)
116 {
117 int ret = 0;
118
119 ret = clk_bulk_prepare_enable(csi2->clks_num, csi2->clks_bulk);
120 if (ret)
121 dev_err(csi2->dev, "failed to enable clks\n");
122
123 return ret;
124 }
125
csi2_disable_clks(struct csi2_dev * csi2)126 static void csi2_disable_clks(struct csi2_dev *csi2)
127 {
128 clk_bulk_disable_unprepare(csi2->clks_num, csi2->clks_bulk);
129 }
130
csi2_disable(struct csi2_dev * csi2)131 static void csi2_disable(struct csi2_dev *csi2)
132 {
133 void __iomem *base = csi2->base;
134
135 write_csihost_reg(base, CSIHOST_RESETN, 0);
136 write_csihost_reg(base, CSIHOST_MSK1, 0xffffffff);
137 write_csihost_reg(base, CSIHOST_MSK2, 0xffffffff);
138 }
139
140 static int csi2_g_mbus_config(struct v4l2_subdev *sd, unsigned int pad_id,
141 struct v4l2_mbus_config *mbus);
142
csi2_enable(struct csi2_dev * csi2,enum host_type_t host_type)143 static void csi2_enable(struct csi2_dev *csi2,
144 enum host_type_t host_type)
145 {
146 void __iomem *base = csi2->base;
147 int lanes = csi2->bus.num_data_lanes;
148 struct v4l2_mbus_config mbus;
149 u32 val = 0;
150
151 csi2_g_mbus_config(&csi2->sd, 0, &mbus);
152 if (mbus.type == V4L2_MBUS_CSI2_DPHY)
153 val = SW_CPHY_EN(0);
154 else if (mbus.type == V4L2_MBUS_CSI2_CPHY)
155 val = SW_CPHY_EN(1);
156
157 write_csihost_reg(base, CSIHOST_N_LANES, lanes - 1);
158
159 if (host_type == RK_DSI_RXHOST) {
160 val |= SW_DSI_EN(1) | SW_DATATYPE_FS(0x01) |
161 SW_DATATYPE_FE(0x11) | SW_DATATYPE_LS(0x21) |
162 SW_DATATYPE_LE(0x31);
163 write_csihost_reg(base, CSIHOST_CONTROL, val);
164 /* Disable some error interrupt when HOST work on DSI RX mode */
165 write_csihost_reg(base, CSIHOST_MSK1, 0xe00000f0);
166 write_csihost_reg(base, CSIHOST_MSK2, 0xff00);
167 } else {
168 val |= SW_DSI_EN(0) | SW_DATATYPE_FS(0x0) |
169 SW_DATATYPE_FE(0x01) | SW_DATATYPE_LS(0x02) |
170 SW_DATATYPE_LE(0x03);
171 write_csihost_reg(base, CSIHOST_CONTROL, val);
172 write_csihost_reg(base, CSIHOST_MSK1, 0);
173 write_csihost_reg(base, CSIHOST_MSK2, 0xf000);
174 }
175
176 write_csihost_reg(base, CSIHOST_RESETN, 1);
177 }
178
csi2_start(struct csi2_dev * csi2)179 static int csi2_start(struct csi2_dev *csi2)
180 {
181 enum host_type_t host_type;
182 int ret, i;
183
184 atomic_set(&csi2->frm_sync_seq, 0);
185
186 csi2_hw_do_reset(csi2);
187 ret = csi2_enable_clks(csi2);
188 if (ret) {
189 v4l2_err(&csi2->sd, "%s: enable clks failed\n", __func__);
190 return ret;
191 }
192
193 csi2_update_sensor_info(csi2);
194
195 if (csi2->format_mbus.code == MEDIA_BUS_FMT_RGB888_1X24)
196 host_type = RK_DSI_RXHOST;
197 else
198 host_type = RK_CSI_RXHOST;
199
200 csi2_enable(csi2, host_type);
201
202 pr_debug("stream sd: %s\n", csi2->src_sd->name);
203 ret = v4l2_subdev_call(csi2->src_sd, video, s_stream, 1);
204 ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
205 if (ret)
206 goto err_assert_reset;
207
208 for (i = 0; i < RK_CSI2_ERR_MAX; i++)
209 csi2->err_list[i].cnt = 0;
210
211 return 0;
212
213 err_assert_reset:
214 csi2_disable(csi2);
215 csi2_disable_clks(csi2);
216
217 return ret;
218 }
219
csi2_stop(struct csi2_dev * csi2)220 static void csi2_stop(struct csi2_dev *csi2)
221 {
222 /* stop upstream */
223 v4l2_subdev_call(csi2->src_sd, video, s_stream, 0);
224
225 csi2_disable(csi2);
226 csi2_hw_do_reset(csi2);
227 csi2_disable_clks(csi2);
228 }
229
230 /*
231 * V4L2 subdev operations.
232 */
233
csi2_s_stream(struct v4l2_subdev * sd,int enable)234 static int csi2_s_stream(struct v4l2_subdev *sd, int enable)
235 {
236 struct csi2_dev *csi2 = sd_to_dev(sd);
237 int ret = 0;
238
239 mutex_lock(&csi2->lock);
240
241 dev_err(csi2->dev, "stream %s, src_sd: %p, sd_name:%s\n",
242 enable ? "on" : "off",
243 csi2->src_sd, csi2->src_sd->name);
244
245 /*
246 * enable/disable streaming only if stream_count is
247 * going from 0 to 1 / 1 to 0.
248 */
249 if (csi2->stream_count != !enable)
250 goto update_count;
251
252 dev_err(csi2->dev, "stream %s\n", enable ? "ON" : "OFF");
253
254 if (enable)
255 ret = csi2_start(csi2);
256 else
257 csi2_stop(csi2);
258 if (ret)
259 goto out;
260
261 update_count:
262 csi2->stream_count += enable ? 1 : -1;
263 if (csi2->stream_count < 0)
264 csi2->stream_count = 0;
265 out:
266 mutex_unlock(&csi2->lock);
267
268 return ret;
269 }
270
csi2_link_setup(struct media_entity * entity,const struct media_pad * local,const struct media_pad * remote,u32 flags)271 static int csi2_link_setup(struct media_entity *entity,
272 const struct media_pad *local,
273 const struct media_pad *remote, u32 flags)
274 {
275 struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
276 struct csi2_dev *csi2 = sd_to_dev(sd);
277 struct v4l2_subdev *remote_sd;
278 int ret = 0;
279
280 remote_sd = media_entity_to_v4l2_subdev(remote->entity);
281
282 mutex_lock(&csi2->lock);
283
284 if (local->flags & MEDIA_PAD_FL_SOURCE) {
285 if (flags & MEDIA_LNK_FL_ENABLED) {
286 if (csi2->sink_linked[local->index - 1]) {
287 ret = -EBUSY;
288 goto out;
289 }
290 csi2->sink_linked[local->index - 1] = true;
291 } else {
292 csi2->sink_linked[local->index - 1] = false;
293 }
294 } else {
295 if (flags & MEDIA_LNK_FL_ENABLED) {
296 if (csi2->src_sd) {
297 ret = -EBUSY;
298 goto out;
299 }
300 csi2->src_sd = remote_sd;
301 } else {
302 csi2->src_sd = NULL;
303 }
304 }
305
306 out:
307 mutex_unlock(&csi2->lock);
308 return ret;
309 }
310
csi2_media_init(struct v4l2_subdev * sd)311 static int csi2_media_init(struct v4l2_subdev *sd)
312 {
313 struct csi2_dev *csi2 = sd_to_dev(sd);
314 int i = 0, num_pads = 0;
315
316 num_pads = csi2->match_data->num_pads;
317
318 for (i = 0; i < num_pads; i++) {
319 csi2->pad[i].flags = (i == CSI2_SINK_PAD) ?
320 MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
321 }
322
323 csi2->pad[RK_CSI2X_PAD_SOURCE0].flags =
324 MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MUST_CONNECT;
325 csi2->pad[RK_CSI2_PAD_SINK].flags =
326 MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
327
328 /* set a default mbus format */
329 csi2->format_mbus.code = MEDIA_BUS_FMT_UYVY8_2X8;
330 csi2->format_mbus.field = V4L2_FIELD_NONE;
331 csi2->format_mbus.width = RKCIF_DEFAULT_WIDTH;
332 csi2->format_mbus.height = RKCIF_DEFAULT_HEIGHT;
333 csi2->crop.top = 0;
334 csi2->crop.left = 0;
335 csi2->crop.width = RKCIF_DEFAULT_WIDTH;
336 csi2->crop.height = RKCIF_DEFAULT_HEIGHT;
337
338 return media_entity_pads_init(&sd->entity, num_pads, csi2->pad);
339 }
340
341 /* csi2 accepts all fmt/size from sensor */
csi2_get_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_format * fmt)342 static int csi2_get_set_fmt(struct v4l2_subdev *sd,
343 struct v4l2_subdev_pad_config *cfg,
344 struct v4l2_subdev_format *fmt)
345 {
346 int ret;
347 struct csi2_dev *csi2 = sd_to_dev(sd);
348 struct v4l2_subdev *sensor = get_remote_sensor(sd);
349
350 /*
351 * Do not allow format changes and just relay whatever
352 * set currently in the sensor.
353 */
354 ret = v4l2_subdev_call(sensor, pad, get_fmt, NULL, fmt);
355 if (!ret)
356 csi2->format_mbus = fmt->format;
357
358 return ret;
359 }
360
mipi_csi2_get_crop(struct csi2_dev * csi2,struct v4l2_subdev_pad_config * cfg,enum v4l2_subdev_format_whence which)361 static struct v4l2_rect *mipi_csi2_get_crop(struct csi2_dev *csi2,
362 struct v4l2_subdev_pad_config *cfg,
363 enum v4l2_subdev_format_whence which)
364 {
365 if (which == V4L2_SUBDEV_FORMAT_TRY)
366 return v4l2_subdev_get_try_crop(&csi2->sd, cfg, RK_CSI2_PAD_SINK);
367 else
368 return &csi2->crop;
369 }
370
csi2_get_selection(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_selection * sel)371 static int csi2_get_selection(struct v4l2_subdev *sd,
372 struct v4l2_subdev_pad_config *cfg,
373 struct v4l2_subdev_selection *sel)
374 {
375 struct csi2_dev *csi2 = sd_to_dev(sd);
376 struct v4l2_subdev *sensor = get_remote_sensor(sd);
377 struct v4l2_subdev_format fmt;
378 int ret = 0;
379
380 if (!sel) {
381 v4l2_dbg(1, csi2_debug, &csi2->sd, "sel is null\n");
382 goto err;
383 }
384
385 if (sel->pad > RK_CSI2X_PAD_SOURCE3) {
386 v4l2_dbg(1, csi2_debug, &csi2->sd, "pad[%d] isn't matched\n", sel->pad);
387 goto err;
388 }
389
390 switch (sel->target) {
391 case V4L2_SEL_TGT_CROP_BOUNDS:
392 if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
393 sel->pad = 0;
394 ret = v4l2_subdev_call(sensor, pad, get_selection,
395 cfg, sel);
396 if (ret) {
397 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
398 fmt.pad = 0;
399 ret = v4l2_subdev_call(sensor, pad, get_fmt, NULL, &fmt);
400 if (!ret) {
401 csi2->format_mbus = fmt.format;
402 sel->r.top = 0;
403 sel->r.left = 0;
404 sel->r.width = csi2->format_mbus.width;
405 sel->r.height = csi2->format_mbus.height;
406 csi2->crop = sel->r;
407 } else {
408 sel->r = csi2->crop;
409 }
410 } else {
411 csi2->crop = sel->r;
412 }
413 } else {
414 sel->r = *v4l2_subdev_get_try_crop(&csi2->sd, cfg, sel->pad);
415 }
416 break;
417
418 case V4L2_SEL_TGT_CROP:
419 sel->r = *mipi_csi2_get_crop(csi2, cfg, sel->which);
420 break;
421
422 default:
423 return -EINVAL;
424 }
425
426 return 0;
427 err:
428 return -EINVAL;
429 }
430
csi2_set_selection(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_selection * sel)431 static int csi2_set_selection(struct v4l2_subdev *sd,
432 struct v4l2_subdev_pad_config *cfg,
433 struct v4l2_subdev_selection *sel)
434 {
435 struct csi2_dev *csi2 = sd_to_dev(sd);
436 struct v4l2_subdev *sensor = get_remote_sensor(sd);
437 int ret = 0;
438
439 ret = v4l2_subdev_call(sensor, pad, set_selection,
440 cfg, sel);
441 if (!ret)
442 csi2->crop = sel->r;
443
444 return ret;
445 }
446
csi2_g_mbus_config(struct v4l2_subdev * sd,unsigned int pad_id,struct v4l2_mbus_config * mbus)447 static int csi2_g_mbus_config(struct v4l2_subdev *sd, unsigned int pad_id,
448 struct v4l2_mbus_config *mbus)
449 {
450 struct csi2_dev *csi2 = sd_to_dev(sd);
451 struct v4l2_subdev *sensor_sd = get_remote_sensor(sd);
452 int ret;
453
454 ret = v4l2_subdev_call(sensor_sd, pad, get_mbus_config, 0, mbus);
455 if (ret) {
456 mbus->type = V4L2_MBUS_CSI2_DPHY;
457 mbus->flags = csi2->bus.flags;
458 mbus->flags |= BIT(csi2->bus.num_data_lanes - 1);
459 }
460
461 return 0;
462 }
463
464 static const struct media_entity_operations csi2_entity_ops = {
465 .link_setup = csi2_link_setup,
466 .link_validate = v4l2_subdev_link_validate,
467 };
468
rkcif_csi2_event_inc_sof(struct csi2_dev * csi2_dev)469 void rkcif_csi2_event_inc_sof(struct csi2_dev *csi2_dev)
470 {
471 if (csi2_dev) {
472 struct v4l2_event event = {
473 .type = V4L2_EVENT_FRAME_SYNC,
474 .u.frame_sync.frame_sequence =
475 atomic_inc_return(&csi2_dev->frm_sync_seq) - 1,
476 };
477 v4l2_event_queue(csi2_dev->sd.devnode, &event);
478 }
479 }
480
rkcif_csi2_get_sof(struct csi2_dev * csi2_dev)481 u32 rkcif_csi2_get_sof(struct csi2_dev *csi2_dev)
482 {
483 if (csi2_dev)
484 return atomic_read(&csi2_dev->frm_sync_seq) - 1;
485
486 return 0;
487 }
488
rkcif_csi2_set_sof(struct csi2_dev * csi2_dev,u32 seq)489 void rkcif_csi2_set_sof(struct csi2_dev *csi2_dev, u32 seq)
490 {
491 if (csi2_dev)
492 atomic_set(&csi2_dev->frm_sync_seq, seq);
493 }
494
rkcif_csi2_subscribe_event(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)495 static int rkcif_csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
496 struct v4l2_event_subscription *sub)
497 {
498 if (sub->type != V4L2_EVENT_FRAME_SYNC)
499 return -EINVAL;
500
501 return v4l2_event_subscribe(fh, sub, 0, NULL);
502 }
503
rkcif_csi2_s_power(struct v4l2_subdev * sd,int on)504 static int rkcif_csi2_s_power(struct v4l2_subdev *sd, int on)
505 {
506 return 0;
507 }
508
509 static const struct v4l2_subdev_core_ops csi2_core_ops = {
510 .subscribe_event = rkcif_csi2_subscribe_event,
511 .unsubscribe_event = v4l2_event_subdev_unsubscribe,
512 .s_power = rkcif_csi2_s_power,
513 };
514
515 static const struct v4l2_subdev_video_ops csi2_video_ops = {
516 .s_stream = csi2_s_stream,
517 };
518
519 static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
520 .get_fmt = csi2_get_set_fmt,
521 .set_fmt = csi2_get_set_fmt,
522 .get_selection = csi2_get_selection,
523 .set_selection = csi2_set_selection,
524 .get_mbus_config = csi2_g_mbus_config,
525 };
526
527 static const struct v4l2_subdev_ops csi2_subdev_ops = {
528 .core = &csi2_core_ops,
529 .video = &csi2_video_ops,
530 .pad = &csi2_pad_ops,
531 };
532
csi2_parse_endpoint(struct device * dev,struct v4l2_fwnode_endpoint * vep,struct v4l2_async_subdev * asd)533 static int csi2_parse_endpoint(struct device *dev,
534 struct v4l2_fwnode_endpoint *vep,
535 struct v4l2_async_subdev *asd)
536 {
537 struct v4l2_subdev *sd = dev_get_drvdata(dev);
538 struct csi2_dev *csi2 = sd_to_dev(sd);
539
540 if (vep->base.port != 0) {
541 dev_err(dev, "The csi host node needs to parse port 0\n");
542 return -EINVAL;
543 }
544
545 csi2->bus = vep->bus.mipi_csi2;
546
547 return 0;
548 }
549
550 /* The .bound() notifier callback when a match is found */
551 static int
csi2_notifier_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)552 csi2_notifier_bound(struct v4l2_async_notifier *notifier,
553 struct v4l2_subdev *sd,
554 struct v4l2_async_subdev *asd)
555 {
556 struct csi2_dev *csi2 = container_of(notifier,
557 struct csi2_dev,
558 notifier);
559 struct csi2_sensor *sensor;
560 struct media_link *link;
561 unsigned int pad, ret;
562
563 if (csi2->num_sensors == ARRAY_SIZE(csi2->sensors)) {
564 v4l2_err(&csi2->sd,
565 "%s: the num of sd is beyond:%d\n",
566 __func__, csi2->num_sensors);
567 return -EBUSY;
568 }
569 sensor = &csi2->sensors[csi2->num_sensors++];
570 sensor->sd = sd;
571
572 for (pad = 0; pad < sd->entity.num_pads; pad++)
573 if (sensor->sd->entity.pads[pad].flags
574 & MEDIA_PAD_FL_SOURCE)
575 break;
576
577 if (pad == sensor->sd->entity.num_pads) {
578 dev_err(csi2->dev,
579 "failed to find src pad for %s\n",
580 sd->name);
581
582 return -ENXIO;
583 }
584
585 ret = media_create_pad_link(&sensor->sd->entity, pad,
586 &csi2->sd.entity, RK_CSI2_PAD_SINK,
587 0/* csi2->num_sensors != 1 ? 0 : MEDIA_LNK_FL_ENABLED */);
588 if (ret) {
589 dev_err(csi2->dev,
590 "failed to create link for %s\n",
591 sd->name);
592 return ret;
593 }
594
595 link = list_first_entry(&csi2->sd.entity.links, struct media_link, list);
596 ret = media_entity_setup_link(link, MEDIA_LNK_FL_ENABLED);
597 if (ret) {
598 dev_err(csi2->dev,
599 "failed to create link for %s\n",
600 sensor->sd->name);
601 return ret;
602 }
603
604 return 0;
605 }
606
607 /* The .unbind callback */
csi2_notifier_unbind(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)608 static void csi2_notifier_unbind(struct v4l2_async_notifier *notifier,
609 struct v4l2_subdev *sd,
610 struct v4l2_async_subdev *asd)
611 {
612 struct csi2_dev *csi2 = container_of(notifier,
613 struct csi2_dev,
614 notifier);
615 struct csi2_sensor *sensor = sd_to_sensor(csi2, sd);
616
617 sensor->sd = NULL;
618
619 }
620
621 static const struct
622 v4l2_async_notifier_operations csi2_async_ops = {
623 .bound = csi2_notifier_bound,
624 .unbind = csi2_notifier_unbind,
625 };
626
rk_csirx_irq1_handler(int irq,void * ctx)627 static irqreturn_t rk_csirx_irq1_handler(int irq, void *ctx)
628 {
629 struct device *dev = ctx;
630 struct csi2_dev *csi2 = sd_to_dev(dev_get_drvdata(dev));
631 struct csi2_err_stats *err_list = NULL;
632 unsigned long err_stat = 0;
633 u32 val;
634
635 val = read_csihost_reg(csi2->base, CSIHOST_ERR1);
636 if (val) {
637 write_csihost_reg(csi2->base,
638 CSIHOST_ERR1, 0x0);
639
640 if (val & CSIHOST_ERR1_PHYERR_SPTSYNCHS) {
641 err_list = &csi2->err_list[RK_CSI2_ERR_SOTSYN];
642 err_list->cnt++;
643 v4l2_err(&csi2->sd,
644 "ERR1: start of transmission error(no synchronization achieved), reg: 0x%x,cnt:%d\n",
645 val, err_list->cnt);
646 }
647
648 if (val & CSIHOST_ERR1_ERR_BNDRY_MATCH) {
649 err_list = &csi2->err_list[RK_CSI2_ERR_FS_FE_MIS];
650 err_list->cnt++;
651 v4l2_err(&csi2->sd,
652 "ERR1: error matching frame start with frame end, reg: 0x%x,cnt:%d\n",
653 val, err_list->cnt);
654 }
655
656 if (val & CSIHOST_ERR1_ERR_SEQ) {
657 err_list = &csi2->err_list[RK_CSI2_ERR_FRM_SEQ_ERR];
658 err_list->cnt++;
659 v4l2_err(&csi2->sd,
660 "ERR1: incorrect frame sequence detected, reg: 0x%x,cnt:%d\n",
661 val, err_list->cnt);
662 }
663
664 if (val & CSIHOST_ERR1_ERR_FRM_DATA) {
665 err_list = &csi2->err_list[RK_CSI2_ERR_CRC_ONCE];
666 err_list->cnt++;
667 v4l2_dbg(1, csi2_debug, &csi2->sd,
668 "ERR1: at least one crc error, reg: 0x%x\n,cnt:%d", val, err_list->cnt);
669 }
670
671 if (val & CSIHOST_ERR1_ERR_CRC) {
672 err_list = &csi2->err_list[RK_CSI2_ERR_CRC];
673 err_list->cnt++;
674 v4l2_err(&csi2->sd,
675 "ERR1: crc errors, reg: 0x%x, cnt:%d\n",
676 val, err_list->cnt);
677 }
678
679 csi2->err_list[RK_CSI2_ERR_ALL].cnt++;
680 err_stat = ((csi2->err_list[RK_CSI2_ERR_FS_FE_MIS].cnt & 0xff) << 8) |
681 ((csi2->err_list[RK_CSI2_ERR_ALL].cnt) & 0xff);
682
683 atomic_notifier_call_chain(&g_csi_host_chain,
684 err_stat,
685 NULL);
686
687 }
688
689 return IRQ_HANDLED;
690 }
691
rk_csirx_irq2_handler(int irq,void * ctx)692 static irqreturn_t rk_csirx_irq2_handler(int irq, void *ctx)
693 {
694 struct device *dev = ctx;
695 struct csi2_dev *csi2 = sd_to_dev(dev_get_drvdata(dev));
696 u32 val;
697
698 val = read_csihost_reg(csi2->base, CSIHOST_ERR2);
699 if (val) {
700 if (val & CSIHOST_ERR2_PHYERR_ESC)
701 v4l2_err(&csi2->sd, "ERR2: escape entry error(ULPM), reg: 0x%x\n", val);
702 if (val & CSIHOST_ERR2_PHYERR_SOTHS)
703 v4l2_err(&csi2->sd,
704 "ERR2: start of transmission error(synchronization can still be achieved), reg: 0x%x\n",
705 val);
706 if (val & CSIHOST_ERR2_ECC_CORRECTED)
707 v4l2_dbg(1, csi2_debug, &csi2->sd,
708 "ERR2: header error detected and corrected, reg: 0x%x\n",
709 val);
710 if (val & CSIHOST_ERR2_ERR_ID)
711 v4l2_err(&csi2->sd,
712 "ERR2: unrecognized or unimplemented data type detected, reg: 0x%x\n",
713 val);
714 if (val & CSIHOST_ERR2_PHYERR_CODEHS)
715 v4l2_err(&csi2->sd, "ERR2: receiv error code, reg: 0x%x\n", val);
716 }
717
718 return IRQ_HANDLED;
719 }
720
csi2_notifier(struct csi2_dev * csi2)721 static int csi2_notifier(struct csi2_dev *csi2)
722 {
723 struct v4l2_async_notifier *ntf = &csi2->notifier;
724 int ret;
725
726 v4l2_async_notifier_init(ntf);
727
728 ret = v4l2_async_notifier_parse_fwnode_endpoints_by_port(csi2->dev,
729 &csi2->notifier,
730 sizeof(struct v4l2_async_subdev), 0,
731 csi2_parse_endpoint);
732 if (ret < 0)
733 return ret;
734
735 csi2->sd.subdev_notifier = &csi2->notifier;
736 csi2->notifier.ops = &csi2_async_ops;
737 ret = v4l2_async_subdev_notifier_register(&csi2->sd, &csi2->notifier);
738 if (ret) {
739 v4l2_err(&csi2->sd,
740 "failed to register async notifier : %d\n",
741 ret);
742 v4l2_async_notifier_cleanup(&csi2->notifier);
743 return ret;
744 }
745
746 ret = v4l2_async_register_subdev(&csi2->sd);
747
748 return ret;
749 }
750
751 static const struct csi2_match_data rk1808_csi2_match_data = {
752 .chip_id = CHIP_RK1808_CSI2,
753 .num_pads = CSI2_NUM_PADS,
754 };
755
756 static const struct csi2_match_data rk3288_csi2_match_data = {
757 .chip_id = CHIP_RK3288_CSI2,
758 .num_pads = CSI2_NUM_PADS_SINGLE_LINK,
759 };
760
761 static const struct csi2_match_data rv1126_csi2_match_data = {
762 .chip_id = CHIP_RV1126_CSI2,
763 .num_pads = CSI2_NUM_PADS,
764 };
765
766 static const struct csi2_match_data rk3568_csi2_match_data = {
767 .chip_id = CHIP_RK3568_CSI2,
768 .num_pads = CSI2_NUM_PADS,
769 };
770
771 static const struct csi2_match_data rk3588_csi2_match_data = {
772 .chip_id = CHIP_RK3588_CSI2,
773 .num_pads = CSI2_NUM_PADS_MAX,
774 };
775
776 static const struct of_device_id csi2_dt_ids[] = {
777 {
778 .compatible = "rockchip,rk1808-mipi-csi2",
779 .data = &rk1808_csi2_match_data,
780 },
781 {
782 .compatible = "rockchip,rk3288-mipi-csi2",
783 .data = &rk3288_csi2_match_data,
784 },
785 {
786 .compatible = "rockchip,rk3568-mipi-csi2",
787 .data = &rk3568_csi2_match_data,
788 },
789 {
790 .compatible = "rockchip,rv1126-mipi-csi2",
791 .data = &rv1126_csi2_match_data,
792 },
793 {
794 .compatible = "rockchip,rk3588-mipi-csi2",
795 .data = &rk3588_csi2_match_data,
796 },
797 { /* sentinel */ }
798 };
799 MODULE_DEVICE_TABLE(of, csi2_dt_ids);
800
csi2_probe(struct platform_device * pdev)801 static int csi2_probe(struct platform_device *pdev)
802 {
803 const struct of_device_id *match;
804 struct device *dev = &pdev->dev;
805 struct device_node *node = pdev->dev.of_node;
806 struct csi2_dev *csi2 = NULL;
807 struct resource *res;
808 const struct csi2_match_data *data;
809 int ret, irq;
810
811 match = of_match_node(csi2_dt_ids, node);
812 if (IS_ERR(match))
813 return PTR_ERR(match);
814 data = match->data;
815
816 csi2 = devm_kzalloc(&pdev->dev, sizeof(*csi2), GFP_KERNEL);
817 if (!csi2)
818 return -ENOMEM;
819
820 csi2->dev = &pdev->dev;
821 csi2->match_data = data;
822
823 v4l2_subdev_init(&csi2->sd, &csi2_subdev_ops);
824 v4l2_set_subdevdata(&csi2->sd, &pdev->dev);
825 csi2->sd.entity.ops = &csi2_entity_ops;
826 csi2->sd.dev = &pdev->dev;
827 csi2->sd.owner = THIS_MODULE;
828 csi2->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
829 ret = strscpy(csi2->sd.name, DEVICE_NAME, sizeof(csi2->sd.name));
830 if (ret < 0)
831 v4l2_err(&csi2->sd, "failed to copy name\n");
832 platform_set_drvdata(pdev, &csi2->sd);
833
834 csi2->clks_num = devm_clk_bulk_get_all(dev, &csi2->clks_bulk);
835 if (csi2->clks_num < 0)
836 dev_err(dev, "failed to get csi2 clks\n");
837
838 csi2->rsts_bulk = devm_reset_control_array_get_optional_exclusive(dev);
839 if (IS_ERR(csi2->rsts_bulk)) {
840 if (PTR_ERR(csi2->rsts_bulk) != -EPROBE_DEFER)
841 dev_err(dev, "failed to get csi2 reset\n");
842 return PTR_ERR(csi2->rsts_bulk);
843 }
844
845 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
846 csi2->base = devm_ioremap_resource(&pdev->dev, res);
847 if (IS_ERR(csi2->base)) {
848 resource_size_t offset = res->start;
849 resource_size_t size = resource_size(res);
850
851 dev_warn(&pdev->dev, "avoid secondary mipi resource check!\n");
852
853 csi2->base = devm_ioremap(&pdev->dev, offset, size);
854 if (IS_ERR(csi2->base)) {
855 dev_err(&pdev->dev, "Failed to ioremap resource\n");
856
857 return PTR_ERR(csi2->base);
858 }
859 }
860
861 irq = platform_get_irq_byname(pdev, "csi-intr1");
862 if (irq > 0) {
863 ret = devm_request_irq(&pdev->dev, irq,
864 rk_csirx_irq1_handler, 0,
865 dev_driver_string(&pdev->dev),
866 &pdev->dev);
867 if (ret < 0)
868 v4l2_err(&csi2->sd, "request csi-intr1 irq failed: %d\n",
869 ret);
870 } else {
871 v4l2_err(&csi2->sd, "No found irq csi-intr1\n");
872 }
873
874 irq = platform_get_irq_byname(pdev, "csi-intr2");
875 if (irq > 0) {
876 ret = devm_request_irq(&pdev->dev, irq,
877 rk_csirx_irq2_handler, 0,
878 dev_driver_string(&pdev->dev),
879 &pdev->dev);
880 if (ret < 0)
881 v4l2_err(&csi2->sd, "request csi-intr2 failed: %d\n",
882 ret);
883 } else {
884 v4l2_err(&csi2->sd, "No found irq csi-intr2\n");
885 }
886
887 mutex_init(&csi2->lock);
888
889 ret = csi2_media_init(&csi2->sd);
890 if (ret < 0)
891 goto rmmutex;
892 ret = csi2_notifier(csi2);
893 if (ret)
894 goto rmmutex;
895
896 csi2_hw_do_reset(csi2);
897
898 v4l2_info(&csi2->sd, "probe success, v4l2_dev:%s!\n", csi2->sd.v4l2_dev->name);
899
900 return 0;
901
902 rmmutex:
903 mutex_destroy(&csi2->lock);
904 return ret;
905 }
906
csi2_remove(struct platform_device * pdev)907 static int csi2_remove(struct platform_device *pdev)
908 {
909 struct v4l2_subdev *sd = platform_get_drvdata(pdev);
910 struct csi2_dev *csi2 = sd_to_dev(sd);
911
912 v4l2_async_unregister_subdev(sd);
913 mutex_destroy(&csi2->lock);
914 media_entity_cleanup(&sd->entity);
915
916 return 0;
917 }
918
919 static struct platform_driver csi2_driver = {
920 .driver = {
921 .name = DEVICE_NAME,
922 .of_match_table = csi2_dt_ids,
923 },
924 .probe = csi2_probe,
925 .remove = csi2_remove,
926 };
927
rkcif_csi2_plat_drv_init(void)928 int __init rkcif_csi2_plat_drv_init(void)
929 {
930 return platform_driver_register(&csi2_driver);
931 }
932
rkcif_csi2_plat_drv_exit(void)933 void __exit rkcif_csi2_plat_drv_exit(void)
934 {
935 platform_driver_unregister(&csi2_driver);
936 }
937
938 MODULE_DESCRIPTION("Rockchip MIPI CSI2 driver");
939 MODULE_AUTHOR("Macrofly.xu <xuhf@rock-chips.com>");
940 MODULE_LICENSE("GPL");
941