• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Rockchip CIF Driver
4  *
5  * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
6  */
7 #include <linux/clk.h>
8 #include <linux/delay.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/of_gpio.h>
13 #include <linux/of_graph.h>
14 #include <linux/of_platform.h>
15 #include <linux/of_reserved_mem.h>
16 #include <linux/reset.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/pinctrl/consumer.h>
19 #include <linux/regmap.h>
20 #include <media/videobuf2-dma-contig.h>
21 #include <media/v4l2-fwnode.h>
22 #include <linux/iommu.h>
23 #include <dt-bindings/soc/rockchip-system-status.h>
24 #include <soc/rockchip/rockchip-system-status.h>
25 #include <linux/io.h>
26 #include <linux/mfd/syscon.h>
27 #include "dev.h"
28 #include "procfs.h"
29 
30 #define RKCIF_VERNO_LEN		10
31 
32 int rkcif_debug;
33 module_param_named(debug, rkcif_debug, int, 0644);
34 MODULE_PARM_DESC(debug, "Debug level (0-1)");
35 
36 static char rkcif_version[RKCIF_VERNO_LEN];
37 module_param_string(version, rkcif_version, RKCIF_VERNO_LEN, 0444);
38 MODULE_PARM_DESC(version, "version number");
39 
40 static DEFINE_MUTEX(rkcif_dev_mutex);
41 static LIST_HEAD(rkcif_device_list);
42 
43 /* show the compact mode of each stream in stream index order,
44  * 1 for compact, 0 for 16bit
45  */
rkcif_show_compact_mode(struct device * dev,struct device_attribute * attr,char * buf)46 static ssize_t rkcif_show_compact_mode(struct device *dev,
47 					      struct device_attribute *attr,
48 					      char *buf)
49 {
50 	struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
51 	int ret;
52 
53 	ret = snprintf(buf, PAGE_SIZE, "%d %d %d %d\n",
54 		       cif_dev->stream[0].is_compact ? 1 : 0,
55 		       cif_dev->stream[1].is_compact ? 1 : 0,
56 		       cif_dev->stream[2].is_compact ? 1 : 0,
57 		       cif_dev->stream[3].is_compact ? 1 : 0);
58 	return ret;
59 }
60 
rkcif_store_compact_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)61 static ssize_t rkcif_store_compact_mode(struct device *dev,
62 					       struct device_attribute *attr,
63 					       const char *buf, size_t len)
64 {
65 	struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
66 	int i, index;
67 	char val[4];
68 
69 	if (buf) {
70 		index = 0;
71 		for (i = 0; i < len; i++) {
72 			if (buf[i] == ' ') {
73 				continue;
74 			} else if (buf[i] == '\0') {
75 				break;
76 			} else {
77 				val[index] = buf[i];
78 				index++;
79 				if (index == 4)
80 					break;
81 			}
82 		}
83 
84 		for (i = 0; i < index; i++) {
85 			if (val[i] - '0' == 0)
86 				cif_dev->stream[i].is_compact = false;
87 			else
88 				cif_dev->stream[i].is_compact = true;
89 		}
90 	}
91 
92 	return len;
93 }
94 
95 static DEVICE_ATTR(compact_test, S_IWUSR | S_IRUSR,
96 		   rkcif_show_compact_mode, rkcif_store_compact_mode);
97 
rkcif_show_line_int_num(struct device * dev,struct device_attribute * attr,char * buf)98 static ssize_t rkcif_show_line_int_num(struct device *dev,
99 					      struct device_attribute *attr,
100 					      char *buf)
101 {
102 	struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
103 	int ret;
104 
105 	ret = snprintf(buf, PAGE_SIZE, "%d\n",
106 		       cif_dev->wait_line_cache);
107 	return ret;
108 }
109 
rkcif_store_line_int_num(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)110 static ssize_t rkcif_store_line_int_num(struct device *dev,
111 					       struct device_attribute *attr,
112 					       const char *buf, size_t len)
113 {
114 	struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
115 	struct sditf_priv *priv = cif_dev->sditf;
116 	int val = 0;
117 	int ret = 0;
118 
119 	if (priv->toisp_inf.link_mode != TOISP_NONE) {
120 		dev_info(cif_dev->dev,
121 			 "current mode is on the fly, wake up mode wouldn't used\n");
122 		return len;
123 	}
124 	ret = kstrtoint(buf, 0, &val);
125 	if (!ret && val >= 0 && val <= 0x3fff)
126 		cif_dev->wait_line_cache = val;
127 	else
128 		dev_info(cif_dev->dev, "set line int num failed\n");
129 	return len;
130 }
131 
132 static DEVICE_ATTR(wait_line, S_IWUSR | S_IRUSR,
133 		      rkcif_show_line_int_num, rkcif_store_line_int_num);
134 
rkcif_show_dummybuf_mode(struct device * dev,struct device_attribute * attr,char * buf)135 static ssize_t rkcif_show_dummybuf_mode(struct device *dev,
136 					      struct device_attribute *attr,
137 					      char *buf)
138 {
139 	struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
140 	int ret;
141 
142 	ret = snprintf(buf, PAGE_SIZE, "%d\n",
143 		       cif_dev->is_use_dummybuf);
144 	return ret;
145 }
146 
rkcif_store_dummybuf_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)147 static ssize_t rkcif_store_dummybuf_mode(struct device *dev,
148 					       struct device_attribute *attr,
149 					       const char *buf, size_t len)
150 {
151 	struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
152 	int val = 0;
153 	int ret = 0;
154 
155 	ret = kstrtoint(buf, 0, &val);
156 	if (!ret) {
157 		if (val)
158 			cif_dev->is_use_dummybuf = true;
159 		else
160 			cif_dev->is_use_dummybuf = false;
161 	} else {
162 		dev_info(cif_dev->dev, "set dummy buf mode failed\n");
163 	}
164 	return len;
165 }
166 
167 static DEVICE_ATTR(is_use_dummybuf, S_IWUSR | S_IRUSR,
168 		      rkcif_show_dummybuf_mode, rkcif_store_dummybuf_mode);
169 
170 /* show the memory mode of each stream in stream index order,
171  * 1 for high align, 0 for low align
172  */
rkcif_show_memory_mode(struct device * dev,struct device_attribute * attr,char * buf)173 static ssize_t rkcif_show_memory_mode(struct device *dev,
174 					      struct device_attribute *attr,
175 					      char *buf)
176 {
177 	struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
178 	int ret;
179 
180 	ret = snprintf(buf, PAGE_SIZE,
181 		       "stream[0~3] %d %d %d %d, 0(low align) 1(high align) 2(compact)\n",
182 		       cif_dev->stream[0].is_compact ? 2 : (cif_dev->stream[0].is_high_align ? 1 : 0),
183 		       cif_dev->stream[1].is_compact ? 2 : (cif_dev->stream[1].is_high_align ? 1 : 0),
184 		       cif_dev->stream[2].is_compact ? 2 : (cif_dev->stream[2].is_high_align ? 1 : 0),
185 		       cif_dev->stream[3].is_compact ? 2 : (cif_dev->stream[3].is_high_align ? 1 : 0));
186 	return ret;
187 }
188 
rkcif_store_memory_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)189 static ssize_t rkcif_store_memory_mode(struct device *dev,
190 					       struct device_attribute *attr,
191 					       const char *buf, size_t len)
192 {
193 	struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
194 	int i, index;
195 	char val[4];
196 
197 	if (buf) {
198 		index = 0;
199 		for (i = 0; i < len; i++) {
200 			if (buf[i] == ' ') {
201 				continue;
202 			} else if (buf[i] == '\0') {
203 				break;
204 			} else {
205 				val[index] = buf[i];
206 				index++;
207 				if (index == 4)
208 					break;
209 			}
210 		}
211 
212 		for (i = 0; i < index; i++) {
213 			if (cif_dev->stream[i].is_compact) {
214 				dev_info(cif_dev->dev, "stream[%d] set memory align fail, is compact mode\n",
215 					 i);
216 				continue;
217 			}
218 			if (val[i] - '0' == 0)
219 				cif_dev->stream[i].is_high_align = false;
220 			else
221 				cif_dev->stream[i].is_high_align = true;
222 		}
223 	}
224 
225 	return len;
226 }
227 
228 static DEVICE_ATTR(is_high_align, S_IWUSR | S_IRUSR,
229 		   rkcif_show_memory_mode, rkcif_store_memory_mode);
230 
rkcif_show_scale_ch0_blc(struct device * dev,struct device_attribute * attr,char * buf)231 static ssize_t rkcif_show_scale_ch0_blc(struct device *dev,
232 					      struct device_attribute *attr,
233 					      char *buf)
234 {
235 	struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
236 	int ret;
237 
238 	ret = snprintf(buf, PAGE_SIZE, "ch0 pattern00: %d, pattern01: %d, pattern02: %d, pattern03: %d\n",
239 		       cif_dev->scale_vdev[0].blc.pattern00,
240 		       cif_dev->scale_vdev[0].blc.pattern01,
241 		       cif_dev->scale_vdev[0].blc.pattern02,
242 		       cif_dev->scale_vdev[0].blc.pattern03);
243 	return ret;
244 }
245 
rkcif_store_scale_ch0_blc(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)246 static ssize_t rkcif_store_scale_ch0_blc(struct device *dev,
247 					       struct device_attribute *attr,
248 					       const char *buf, size_t len)
249 {
250 	struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
251 	int i = 0, index = 0;
252 	unsigned int val[4] = {0};
253 	unsigned int temp = 0;
254 	int ret = 0;
255 	int j = 0;
256 	char cha[2] = {0};
257 
258 	if (buf) {
259 		index = 0;
260 		for (i = 0; i < len; i++) {
261 			if (((buf[i] == ' ') || (buf[i] == '\n')) && j) {
262 				index++;
263 				j = 0;
264 				if (index == 4)
265 					break;
266 				continue;
267 			} else {
268 				if (buf[i] < '0' || buf[i] > '9')
269 					continue;
270 				cha[0] = buf[i];
271 				cha[1] = '\0';
272 				ret = kstrtoint(cha, 0, &temp);
273 				if (!ret) {
274 					if (j)
275 						val[index] *= 10;
276 					val[index] += temp;
277 					j++;
278 				}
279 			}
280 		}
281 		if (val[0] > 255 || val[1] > 255 || val[2] > 255 || val[3] > 255)
282 			return -EINVAL;
283 		cif_dev->scale_vdev[0].blc.pattern00 = val[0];
284 		cif_dev->scale_vdev[0].blc.pattern01 = val[1];
285 		cif_dev->scale_vdev[0].blc.pattern02 = val[2];
286 		cif_dev->scale_vdev[0].blc.pattern03 = val[3];
287 		dev_info(cif_dev->dev,
288 			 "set ch0 pattern00: %d, pattern01: %d, pattern02: %d, pattern03: %d\n",
289 			 cif_dev->scale_vdev[0].blc.pattern00,
290 			 cif_dev->scale_vdev[0].blc.pattern01,
291 			 cif_dev->scale_vdev[0].blc.pattern02,
292 			 cif_dev->scale_vdev[0].blc.pattern03);
293 	}
294 
295 	return len;
296 }
297 
298 static DEVICE_ATTR(scale_ch0_blc, S_IWUSR | S_IRUSR,
299 		   rkcif_show_scale_ch0_blc, rkcif_store_scale_ch0_blc);
300 
rkcif_show_scale_ch1_blc(struct device * dev,struct device_attribute * attr,char * buf)301 static ssize_t rkcif_show_scale_ch1_blc(struct device *dev,
302 					      struct device_attribute *attr,
303 					      char *buf)
304 {
305 	struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
306 	int ret;
307 
308 	ret = snprintf(buf, PAGE_SIZE, "ch1 pattern00: %d, pattern01: %d, pattern02: %d, pattern03: %d\n",
309 		       cif_dev->scale_vdev[1].blc.pattern00,
310 		       cif_dev->scale_vdev[1].blc.pattern01,
311 		       cif_dev->scale_vdev[1].blc.pattern02,
312 		       cif_dev->scale_vdev[1].blc.pattern03);
313 	return ret;
314 }
315 
rkcif_store_scale_ch1_blc(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)316 static ssize_t rkcif_store_scale_ch1_blc(struct device *dev,
317 					       struct device_attribute *attr,
318 					       const char *buf, size_t len)
319 {
320 	struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
321 	int i = 0, index = 0;
322 	unsigned int val[4] = {0};
323 	unsigned int temp = 0;
324 	int ret = 0;
325 	int j = 0;
326 	char cha[2] = {0};
327 
328 	if (buf) {
329 		index = 0;
330 		for (i = 0; i < len; i++) {
331 			if (((buf[i] == ' ') || (buf[i] == '\n')) && j) {
332 				index++;
333 				j = 0;
334 				if (index == 4)
335 					break;
336 				continue;
337 			} else {
338 				if (buf[i] < '0' || buf[i] > '9')
339 					continue;
340 				cha[0] = buf[i];
341 				cha[1] = '\0';
342 				ret = kstrtoint(cha, 0, &temp);
343 				if (!ret) {
344 					if (j)
345 						val[index] *= 10;
346 					val[index] += temp;
347 					j++;
348 				}
349 			}
350 		}
351 		if (val[0] > 255 || val[1] > 255 || val[2] > 255 || val[3] > 255)
352 			return -EINVAL;
353 
354 		cif_dev->scale_vdev[1].blc.pattern00 = val[0];
355 		cif_dev->scale_vdev[1].blc.pattern01 = val[1];
356 		cif_dev->scale_vdev[1].blc.pattern02 = val[2];
357 		cif_dev->scale_vdev[1].blc.pattern03 = val[3];
358 
359 		dev_info(cif_dev->dev,
360 			 "set ch1 pattern00: %d, pattern01: %d, pattern02: %d, pattern03: %d\n",
361 			 cif_dev->scale_vdev[1].blc.pattern00,
362 			 cif_dev->scale_vdev[1].blc.pattern01,
363 			 cif_dev->scale_vdev[1].blc.pattern02,
364 			 cif_dev->scale_vdev[1].blc.pattern03);
365 	}
366 
367 	return len;
368 }
369 
370 static DEVICE_ATTR(scale_ch1_blc, S_IWUSR | S_IRUSR,
371 		   rkcif_show_scale_ch1_blc, rkcif_store_scale_ch1_blc);
372 
rkcif_show_scale_ch2_blc(struct device * dev,struct device_attribute * attr,char * buf)373 static ssize_t rkcif_show_scale_ch2_blc(struct device *dev,
374 					      struct device_attribute *attr,
375 					      char *buf)
376 {
377 	struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
378 	int ret;
379 
380 	ret = snprintf(buf, PAGE_SIZE, "ch2 pattern00: %d, pattern01: %d, pattern02: %d, pattern03: %d\n",
381 		       cif_dev->scale_vdev[2].blc.pattern00,
382 		       cif_dev->scale_vdev[2].blc.pattern01,
383 		       cif_dev->scale_vdev[2].blc.pattern02,
384 		       cif_dev->scale_vdev[2].blc.pattern03);
385 	return ret;
386 }
387 
rkcif_store_scale_ch2_blc(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)388 static ssize_t rkcif_store_scale_ch2_blc(struct device *dev,
389 					       struct device_attribute *attr,
390 					       const char *buf, size_t len)
391 {
392 	struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
393 	int i = 0, index = 0;
394 	unsigned int val[4] = {0};
395 	unsigned int temp = 0;
396 	int ret = 0;
397 	int j = 0;
398 	char cha[2] = {0};
399 
400 	if (buf) {
401 		index = 0;
402 		for (i = 0; i < len; i++) {
403 			if (((buf[i] == ' ') || (buf[i] == '\n')) && j) {
404 				index++;
405 				j = 0;
406 				if (index == 4)
407 					break;
408 				continue;
409 			} else {
410 				if (buf[i] < '0' || buf[i] > '9')
411 					continue;
412 				cha[0] = buf[i];
413 				cha[1] = '\0';
414 				ret = kstrtoint(cha, 0, &temp);
415 				if (!ret) {
416 					if (j)
417 						val[index] *= 10;
418 					val[index] += temp;
419 					j++;
420 				}
421 			}
422 		}
423 		if (val[0] > 255 || val[1] > 255 || val[2] > 255 || val[3] > 255)
424 			return -EINVAL;
425 
426 		cif_dev->scale_vdev[2].blc.pattern00 = val[0];
427 		cif_dev->scale_vdev[2].blc.pattern01 = val[1];
428 		cif_dev->scale_vdev[2].blc.pattern02 = val[2];
429 		cif_dev->scale_vdev[2].blc.pattern03 = val[3];
430 
431 		dev_info(cif_dev->dev,
432 			 "set ch2 pattern00: %d, pattern01: %d, pattern02: %d, pattern03: %d\n",
433 			 cif_dev->scale_vdev[2].blc.pattern00,
434 			 cif_dev->scale_vdev[2].blc.pattern01,
435 			 cif_dev->scale_vdev[2].blc.pattern02,
436 			 cif_dev->scale_vdev[2].blc.pattern03);
437 	}
438 
439 	return len;
440 }
441 static DEVICE_ATTR(scale_ch2_blc, S_IWUSR | S_IRUSR,
442 		   rkcif_show_scale_ch2_blc, rkcif_store_scale_ch2_blc);
443 
rkcif_show_scale_ch3_blc(struct device * dev,struct device_attribute * attr,char * buf)444 static ssize_t rkcif_show_scale_ch3_blc(struct device *dev,
445 					      struct device_attribute *attr,
446 					      char *buf)
447 {
448 	struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
449 	int ret;
450 
451 	ret = snprintf(buf, PAGE_SIZE, "ch3 pattern00: %d, pattern01: %d, pattern02: %d, pattern03: %d\n",
452 		       cif_dev->scale_vdev[3].blc.pattern00,
453 		       cif_dev->scale_vdev[3].blc.pattern01,
454 		       cif_dev->scale_vdev[3].blc.pattern02,
455 		       cif_dev->scale_vdev[3].blc.pattern03);
456 	return ret;
457 }
458 
rkcif_store_scale_ch3_blc(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)459 static ssize_t rkcif_store_scale_ch3_blc(struct device *dev,
460 					       struct device_attribute *attr,
461 					       const char *buf, size_t len)
462 {
463 	struct rkcif_device *cif_dev = (struct rkcif_device *)dev_get_drvdata(dev);
464 	int i = 0, index = 0;
465 	unsigned int val[4] = {0};
466 	unsigned int temp = 0;
467 	int ret = 0;
468 	int j = 0;
469 	char cha[2] = {0};
470 
471 	if (buf) {
472 		index = 0;
473 		for (i = 0; i < len; i++) {
474 			if (((buf[i] == ' ') || (buf[i] == '\n')) && j) {
475 				index++;
476 				j = 0;
477 				if (index == 4)
478 					break;
479 				continue;
480 			} else {
481 				if (buf[i] < '0' || buf[i] > '9')
482 					continue;
483 				cha[0] = buf[i];
484 				cha[1] = '\0';
485 				ret = kstrtoint(cha, 0, &temp);
486 				if (!ret) {
487 					if (j)
488 						val[index] *= 10;
489 					val[index] += temp;
490 					j++;
491 				}
492 			}
493 		}
494 		if (val[0] > 255 || val[1] > 255 || val[2] > 255 || val[3] > 255)
495 			return -EINVAL;
496 
497 		cif_dev->scale_vdev[3].blc.pattern00 = val[0];
498 		cif_dev->scale_vdev[3].blc.pattern01 = val[1];
499 		cif_dev->scale_vdev[3].blc.pattern02 = val[2];
500 		cif_dev->scale_vdev[3].blc.pattern03 = val[3];
501 
502 		dev_info(cif_dev->dev,
503 			 "set ch3 pattern00: %d, pattern01: %d, pattern02: %d, pattern03: %d\n",
504 			 cif_dev->scale_vdev[3].blc.pattern00,
505 			 cif_dev->scale_vdev[3].blc.pattern01,
506 			 cif_dev->scale_vdev[3].blc.pattern02,
507 			 cif_dev->scale_vdev[3].blc.pattern03);
508 	}
509 
510 	return len;
511 }
512 
513 static DEVICE_ATTR(scale_ch3_blc, S_IWUSR | S_IRUSR,
514 		   rkcif_show_scale_ch3_blc, rkcif_store_scale_ch3_blc);
515 
516 static struct attribute *dev_attrs[] = {
517 	&dev_attr_compact_test.attr,
518 	&dev_attr_wait_line.attr,
519 	&dev_attr_is_use_dummybuf.attr,
520 	&dev_attr_is_high_align.attr,
521 	&dev_attr_scale_ch0_blc.attr,
522 	&dev_attr_scale_ch1_blc.attr,
523 	&dev_attr_scale_ch2_blc.attr,
524 	&dev_attr_scale_ch3_blc.attr,
525 	NULL,
526 };
527 
528 static struct attribute_group dev_attr_grp = {
529 	.attrs = dev_attrs,
530 };
531 
532 struct rkcif_match_data {
533 	int inf_id;
534 };
535 
rkcif_write_register(struct rkcif_device * dev,enum cif_reg_index index,u32 val)536 void rkcif_write_register(struct rkcif_device *dev,
537 			  enum cif_reg_index index, u32 val)
538 {
539 	void __iomem *base = dev->hw_dev->base_addr;
540 	const struct cif_reg *reg = &dev->hw_dev->cif_regs[index];
541 	int csi_offset = 0;
542 
543 	if (dev->inf_id == RKCIF_MIPI_LVDS &&
544 	   dev->chip_id == CHIP_RK3588_CIF &&
545 	   index >= CIF_REG_MIPI_LVDS_ID0_CTRL0 &&
546 	   index <= CIF_REG_MIPI_ON_PAD)
547 		csi_offset = dev->csi_host_idx * 0x100;
548 	if (index < CIF_REG_INDEX_MAX) {
549 		if (index == CIF_REG_DVP_CTRL ||
550 		    (index != CIF_REG_DVP_CTRL && reg->offset != 0x0))
551 			write_cif_reg(base, reg->offset + csi_offset, val);
552 		else
553 			v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
554 				 "write reg[%d]:0x%x failed, maybe useless!!!\n",
555 				 index, val);
556 	}
557 }
558 
rkcif_write_register_or(struct rkcif_device * dev,enum cif_reg_index index,u32 val)559 void rkcif_write_register_or(struct rkcif_device *dev,
560 			     enum cif_reg_index index, u32 val)
561 {
562 	unsigned int reg_val = 0x0;
563 	void __iomem *base = dev->hw_dev->base_addr;
564 	const struct cif_reg *reg = &dev->hw_dev->cif_regs[index];
565 	int csi_offset = 0;
566 
567 	if (dev->inf_id == RKCIF_MIPI_LVDS &&
568 	   dev->chip_id == CHIP_RK3588_CIF &&
569 	   index >= CIF_REG_MIPI_LVDS_ID0_CTRL0 &&
570 	   index <= CIF_REG_MIPI_ON_PAD)
571 		csi_offset = dev->csi_host_idx * 0x100;
572 
573 	if (index < CIF_REG_INDEX_MAX) {
574 		if (index == CIF_REG_DVP_CTRL ||
575 		    (index != CIF_REG_DVP_CTRL && reg->offset != 0x0)) {
576 			reg_val = read_cif_reg(base, reg->offset + csi_offset);
577 			reg_val |= val;
578 			write_cif_reg(base, reg->offset + csi_offset, reg_val);
579 		} else {
580 			v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
581 				 "write reg[%d]:0x%x with OR failed, maybe useless!!!\n",
582 				 index, val);
583 		}
584 	}
585 }
586 
rkcif_write_register_and(struct rkcif_device * dev,enum cif_reg_index index,u32 val)587 void rkcif_write_register_and(struct rkcif_device *dev,
588 			      enum cif_reg_index index, u32 val)
589 {
590 	unsigned int reg_val = 0x0;
591 	void __iomem *base = dev->hw_dev->base_addr;
592 	const struct cif_reg *reg = &dev->hw_dev->cif_regs[index];
593 	int csi_offset = 0;
594 
595 	if (dev->inf_id == RKCIF_MIPI_LVDS &&
596 	   dev->chip_id == CHIP_RK3588_CIF &&
597 	   index >= CIF_REG_MIPI_LVDS_ID0_CTRL0 &&
598 	   index <= CIF_REG_MIPI_ON_PAD)
599 		csi_offset = dev->csi_host_idx * 0x100;
600 
601 	if (index < CIF_REG_INDEX_MAX) {
602 		if (index == CIF_REG_DVP_CTRL ||
603 		    (index != CIF_REG_DVP_CTRL && reg->offset != 0x0)) {
604 			reg_val = read_cif_reg(base, reg->offset + csi_offset);
605 			reg_val &= val;
606 			write_cif_reg(base, reg->offset + csi_offset, reg_val);
607 		} else {
608 			v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
609 				 "write reg[%d]:0x%x with OR failed, maybe useless!!!\n",
610 				 index, val);
611 		}
612 	}
613 }
614 
rkcif_read_register(struct rkcif_device * dev,enum cif_reg_index index)615 unsigned int rkcif_read_register(struct rkcif_device *dev,
616 				 enum cif_reg_index index)
617 {
618 	unsigned int val = 0x0;
619 	void __iomem *base = dev->hw_dev->base_addr;
620 	const struct cif_reg *reg = &dev->hw_dev->cif_regs[index];
621 	int csi_offset = 0;
622 
623 	if (dev->inf_id == RKCIF_MIPI_LVDS &&
624 	   dev->chip_id == CHIP_RK3588_CIF &&
625 	   index >= CIF_REG_MIPI_LVDS_ID0_CTRL0 &&
626 	   index <= CIF_REG_MIPI_ON_PAD)
627 		csi_offset = dev->csi_host_idx * 0x100;
628 
629 	if (index < CIF_REG_INDEX_MAX) {
630 		if (index == CIF_REG_DVP_CTRL ||
631 		    (index != CIF_REG_DVP_CTRL && reg->offset != 0x0))
632 			val = read_cif_reg(base, reg->offset + csi_offset);
633 		else
634 			v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
635 				 "read reg[%d] failed, maybe useless!!!\n",
636 				 index);
637 	}
638 
639 	return val;
640 }
641 
rkcif_write_grf_reg(struct rkcif_device * dev,enum cif_reg_index index,u32 val)642 void rkcif_write_grf_reg(struct rkcif_device *dev,
643 			 enum cif_reg_index index, u32 val)
644 {
645 	struct rkcif_hw *cif_hw = dev->hw_dev;
646 	const struct cif_reg *reg = &cif_hw->cif_regs[index];
647 
648 	if (index < CIF_REG_INDEX_MAX) {
649 		if (index > CIF_REG_DVP_CTRL) {
650 			if (!IS_ERR(cif_hw->grf))
651 				regmap_write(cif_hw->grf, reg->offset, val);
652 		} else {
653 			v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
654 				 "write reg[%d]:0x%x failed, maybe useless!!!\n",
655 				 index, val);
656 		}
657 	}
658 }
659 
rkcif_read_grf_reg(struct rkcif_device * dev,enum cif_reg_index index)660 u32 rkcif_read_grf_reg(struct rkcif_device *dev, enum cif_reg_index index)
661 {
662 	struct rkcif_hw *cif_hw = dev->hw_dev;
663 	const struct cif_reg *reg = &cif_hw->cif_regs[index];
664 	u32 val = 0xffff;
665 
666 	if (index < CIF_REG_INDEX_MAX) {
667 		if (index > CIF_REG_DVP_CTRL) {
668 			if (!IS_ERR(cif_hw->grf))
669 				regmap_read(cif_hw->grf, reg->offset, &val);
670 		} else {
671 			v4l2_dbg(1, rkcif_debug, &dev->v4l2_dev,
672 				 "read reg[%d] failed, maybe useless!!!\n",
673 				 index);
674 		}
675 	}
676 
677 	return val;
678 }
679 
rkcif_enable_dvp_clk_dual_edge(struct rkcif_device * dev,bool on)680 void rkcif_enable_dvp_clk_dual_edge(struct rkcif_device *dev, bool on)
681 {
682 	struct rkcif_hw *cif_hw = dev->hw_dev;
683 	u32 val = 0x0;
684 
685 	if (!IS_ERR(cif_hw->grf)) {
686 
687 		if (dev->chip_id == CHIP_RK3568_CIF) {
688 			if (on)
689 				val = RK3568_CIF_PCLK_DUAL_EDGE;
690 			else
691 				val = RK3568_CIF_PCLK_SINGLE_EDGE;
692 			rkcif_write_grf_reg(dev, CIF_REG_GRF_CIFIO_CON1, val);
693 		} else if (dev->chip_id == CHIP_RV1126_CIF) {
694 			if (on)
695 				val = CIF_SAMPLING_EDGE_DOUBLE;
696 			else
697 				val = CIF_SAMPLING_EDGE_SINGLE;
698 			rkcif_write_grf_reg(dev, CIF_REG_GRF_CIFIO_CON, val);
699 		} else if (dev->chip_id == CHIP_RK3588_CIF) {
700 			if (on)
701 				val = RK3588_CIF_PCLK_DUAL_EDGE;
702 			else
703 				val = RK3588_CIF_PCLK_SINGLE_EDGE;
704 			rkcif_write_grf_reg(dev, CIF_REG_GRF_CIFIO_CON, val);
705 		}
706 	}
707 
708 	v4l2_info(&dev->v4l2_dev,
709 		  "set dual edge mode(%s,0x%x)!!!\n", on ? "on" : "off", val);
710 }
711 
rkcif_config_dvp_clk_sampling_edge(struct rkcif_device * dev,enum rkcif_clk_edge edge)712 void rkcif_config_dvp_clk_sampling_edge(struct rkcif_device *dev,
713 					enum rkcif_clk_edge edge)
714 {
715 	struct rkcif_hw *cif_hw = dev->hw_dev;
716 	u32 val = 0x0;
717 
718 	if (!IS_ERR(cif_hw->grf)) {
719 		if (dev->chip_id == CHIP_RV1126_CIF) {
720 			if (edge == RKCIF_CLK_RISING)
721 				val = CIF_PCLK_SAMPLING_EDGE_RISING;
722 			else
723 				val = CIF_PCLK_SAMPLING_EDGE_FALLING;
724 		}
725 
726 		if (dev->chip_id == CHIP_RK3568_CIF) {
727 			if (edge == RKCIF_CLK_RISING)
728 				val = RK3568_CIF_PCLK_SAMPLING_EDGE_RISING;
729 			else
730 				val = RK3568_CIF_PCLK_SAMPLING_EDGE_FALLING;
731 		}
732 
733 		if (dev->chip_id == CHIP_RK3588_CIF) {
734 			if (edge == RKCIF_CLK_RISING)
735 				val = RK3588_CIF_PCLK_SAMPLING_EDGE_RISING;
736 			else
737 				val = RK3588_CIF_PCLK_SAMPLING_EDGE_FALLING;
738 		}
739 		rkcif_write_grf_reg(dev, CIF_REG_GRF_CIFIO_CON, val);
740 	}
741 }
742 
743 /**************************** pipeline operations *****************************/
__cif_pipeline_prepare(struct rkcif_pipeline * p,struct media_entity * me)744 static int __cif_pipeline_prepare(struct rkcif_pipeline *p,
745 				  struct media_entity *me)
746 {
747 	struct v4l2_subdev *sd;
748 	int i;
749 
750 	p->num_subdevs = 0;
751 	memset(p->subdevs, 0, sizeof(p->subdevs));
752 
753 	while (1) {
754 		struct media_pad *pad = NULL;
755 
756 		/* Find remote source pad */
757 		for (i = 0; i < me->num_pads; i++) {
758 			struct media_pad *spad = &me->pads[i];
759 
760 			if (!(spad->flags & MEDIA_PAD_FL_SINK))
761 				continue;
762 			pad = media_entity_remote_pad(spad);
763 			if (pad)
764 				break;
765 		}
766 
767 		if (!pad)
768 			break;
769 
770 		sd = media_entity_to_v4l2_subdev(pad->entity);
771 		p->subdevs[p->num_subdevs++] = sd;
772 		me = &sd->entity;
773 		if (me->num_pads == 1)
774 			break;
775 	}
776 
777 	return 0;
778 }
779 
__cif_pipeline_s_cif_clk(struct rkcif_pipeline * p)780 static int __cif_pipeline_s_cif_clk(struct rkcif_pipeline *p)
781 {
782 	return 0;
783 }
784 
rkcif_pipeline_open(struct rkcif_pipeline * p,struct media_entity * me,bool prepare)785 static int rkcif_pipeline_open(struct rkcif_pipeline *p,
786 			       struct media_entity *me,
787 				bool prepare)
788 {
789 	int ret;
790 
791 	if (WARN_ON(!p || !me))
792 		return -EINVAL;
793 	if (atomic_inc_return(&p->power_cnt) > 1)
794 		return 0;
795 
796 	/* go through media graphic and get subdevs */
797 	if (prepare)
798 		__cif_pipeline_prepare(p, me);
799 
800 	if (!p->num_subdevs)
801 		return -EINVAL;
802 
803 	ret = __cif_pipeline_s_cif_clk(p);
804 	if (ret < 0)
805 		return ret;
806 
807 	return 0;
808 }
809 
rkcif_pipeline_close(struct rkcif_pipeline * p)810 static int rkcif_pipeline_close(struct rkcif_pipeline *p)
811 {
812 	atomic_dec_return(&p->power_cnt);
813 
814 	return 0;
815 }
816 
rkcif_set_sensor_streamon_in_sync_mode(struct rkcif_device * cif_dev)817 static void rkcif_set_sensor_streamon_in_sync_mode(struct rkcif_device *cif_dev)
818 {
819 	struct rkcif_hw *hw = cif_dev->hw_dev;
820 	struct rkcif_device *dev = NULL;
821 	int i = 0;
822 	int on = 1;
823 	int ret = 0;
824 	bool is_streaming = false;
825 
826 	if (cif_dev->sync_type) {
827 		hw->sync_config.streaming_cnt++;
828 		if (hw->sync_config.streaming_cnt < hw->sync_config.dev_cnt)
829 			return;
830 	} else {
831 		return;
832 	}
833 
834 	if (hw->sync_config.mode == RKCIF_MASTER_MASTER ||
835 	    hw->sync_config.mode == RKCIF_MASTER_SLAVE) {
836 		for (i = 0; i < hw->sync_config.slave.count; i++) {
837 			dev = hw->sync_config.slave.cif_dev[i];
838 			is_streaming = hw->sync_config.slave.is_streaming[i];
839 			if (!is_streaming) {
840 				ret = v4l2_subdev_call(dev->terminal_sensor.sd, core, ioctl,
841 						       RKMODULE_SET_QUICK_STREAM, &on);
842 				if (!ret)
843 					dev_info(dev->dev,
844 						 "set RKMODULE_SET_QUICK_STREAM failed\n");
845 				hw->sync_config.slave.is_streaming[i] = true;
846 			}
847 			v4l2_dbg(3, rkcif_debug, &dev->v4l2_dev,
848 				 "quick stream in sync mode, slave_dev[%d]\n", i);
849 
850 		}
851 		for (i = 0; i < hw->sync_config.ext_master.count; i++) {
852 			dev = hw->sync_config.ext_master.cif_dev[i];
853 			is_streaming = hw->sync_config.ext_master.is_streaming[i];
854 			if (!is_streaming) {
855 				ret = v4l2_subdev_call(dev->terminal_sensor.sd, core, ioctl,
856 						       RKMODULE_SET_QUICK_STREAM, &on);
857 				if (!ret)
858 					dev_info(dev->dev,
859 						 "set RKMODULE_SET_QUICK_STREAM failed\n");
860 				hw->sync_config.ext_master.is_streaming[i] = true;
861 			}
862 			v4l2_dbg(3, rkcif_debug, &dev->v4l2_dev,
863 				 "quick stream in sync mode, ext_master_dev[%d]\n", i);
864 		}
865 		for (i = 0; i < hw->sync_config.int_master.count; i++) {
866 			dev = hw->sync_config.int_master.cif_dev[i];
867 			is_streaming = hw->sync_config.int_master.is_streaming[i];
868 			if (!is_streaming) {
869 				ret = v4l2_subdev_call(dev->terminal_sensor.sd, core, ioctl,
870 						       RKMODULE_SET_QUICK_STREAM, &on);
871 				if (!ret)
872 					dev_info(hw->dev,
873 						 "set RKMODULE_SET_QUICK_STREAM failed\n");
874 				hw->sync_config.int_master.is_streaming[i] = true;
875 			}
876 			v4l2_dbg(3, rkcif_debug, &dev->v4l2_dev,
877 				 "quick stream in sync mode, int_master_dev[%d]\n", i);
878 		}
879 	}
880 }
881 
882 /*
883  * stream-on order: isp_subdev, mipi dphy, sensor
884  * stream-off order: mipi dphy, sensor, isp_subdev
885  */
rkcif_pipeline_set_stream(struct rkcif_pipeline * p,bool on)886 static int rkcif_pipeline_set_stream(struct rkcif_pipeline *p, bool on)
887 {
888 	struct rkcif_device *cif_dev = container_of(p, struct rkcif_device, pipe);
889 	bool can_be_set = false;
890 	int i, ret;
891 
892 	if (cif_dev->hdr.hdr_mode == NO_HDR) {
893 		if ((on && atomic_inc_return(&p->stream_cnt) > 1) ||
894 		    (!on && atomic_dec_return(&p->stream_cnt) > 0))
895 			return 0;
896 
897 		if (on) {
898 			rockchip_set_system_status(SYS_STATUS_CIF0);
899 			cif_dev->irq_stats.csi_overflow_cnt = 0;
900 			cif_dev->irq_stats.csi_bwidth_lack_cnt = 0;
901 			cif_dev->irq_stats.dvp_bus_err_cnt = 0;
902 			cif_dev->irq_stats.dvp_line_err_cnt = 0;
903 			cif_dev->irq_stats.dvp_overflow_cnt = 0;
904 			cif_dev->irq_stats.dvp_pix_err_cnt = 0;
905 			cif_dev->irq_stats.all_err_cnt = 0;
906 			cif_dev->irq_stats.csi_size_err_cnt = 0;
907 			cif_dev->irq_stats.dvp_size_err_cnt = 0;
908 			cif_dev->irq_stats.dvp_bwidth_lack_cnt = 0;
909 			cif_dev->irq_stats.all_frm_end_cnt = 0;
910 			cif_dev->reset_watchdog_timer.is_triggered = false;
911 			cif_dev->reset_watchdog_timer.is_running = false;
912 			cif_dev->reset_watchdog_timer.last_buf_wakeup_cnt = 0;
913 			cif_dev->reset_watchdog_timer.run_cnt = 0;
914 			cif_dev->buf_wake_up_cnt = 0;
915 		}
916 
917 		/* phy -> sensor */
918 		for (i = 0; i < p->num_subdevs; i++) {
919 			ret = v4l2_subdev_call(p->subdevs[i], video, s_stream, on);
920 			if (on && ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
921 				goto err_stream_off;
922 		}
923 		if (on)
924 			rkcif_set_sensor_streamon_in_sync_mode(cif_dev);
925 	} else {
926 		if (!on && atomic_dec_return(&p->stream_cnt) > 0)
927 			return 0;
928 
929 		if (on) {
930 			atomic_inc(&p->stream_cnt);
931 			if (cif_dev->hdr.hdr_mode == HDR_X2) {
932 				if (atomic_read(&p->stream_cnt) == 1) {
933 					rockchip_set_system_status(SYS_STATUS_CIF0);
934 					can_be_set = false;
935 				} else if (atomic_read(&p->stream_cnt) == 2) {
936 					can_be_set = true;
937 				}
938 			} else if (cif_dev->hdr.hdr_mode == HDR_X3) {
939 				if (atomic_read(&p->stream_cnt) == 1) {
940 					rockchip_set_system_status(SYS_STATUS_CIF0);
941 					can_be_set = false;
942 				} else if (atomic_read(&p->stream_cnt) == 3) {
943 					can_be_set = true;
944 				}
945 			}
946 		}
947 
948 		if ((on && can_be_set) || !on) {
949 			if (on) {
950 				cif_dev->irq_stats.csi_overflow_cnt = 0;
951 				cif_dev->irq_stats.csi_bwidth_lack_cnt = 0;
952 				cif_dev->irq_stats.dvp_bus_err_cnt = 0;
953 				cif_dev->irq_stats.dvp_line_err_cnt = 0;
954 				cif_dev->irq_stats.dvp_overflow_cnt = 0;
955 				cif_dev->irq_stats.dvp_pix_err_cnt = 0;
956 				cif_dev->irq_stats.dvp_bwidth_lack_cnt = 0;
957 				cif_dev->irq_stats.all_err_cnt = 0;
958 				cif_dev->irq_stats.csi_size_err_cnt = 0;
959 				cif_dev->irq_stats.dvp_size_err_cnt = 0;
960 				cif_dev->irq_stats.all_frm_end_cnt = 0;
961 				cif_dev->is_start_hdr = true;
962 				cif_dev->reset_watchdog_timer.is_triggered = false;
963 				cif_dev->reset_watchdog_timer.is_running = false;
964 				cif_dev->reset_watchdog_timer.last_buf_wakeup_cnt = 0;
965 				cif_dev->reset_watchdog_timer.run_cnt = 0;
966 				cif_dev->buf_wake_up_cnt = 0;
967 			}
968 
969 			/* phy -> sensor */
970 			for (i = 0; i < p->num_subdevs; i++) {
971 				ret = v4l2_subdev_call(p->subdevs[i], video, s_stream, on);
972 
973 				if (on && ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
974 					goto err_stream_off;
975 			}
976 
977 			if (on)
978 				rkcif_set_sensor_streamon_in_sync_mode(cif_dev);
979 		}
980 	}
981 
982 	if (!on)
983 		rockchip_clear_system_status(SYS_STATUS_CIF0);
984 
985 	return 0;
986 
987 err_stream_off:
988 	for (--i; i >= 0; --i)
989 		v4l2_subdev_call(p->subdevs[i], video, s_stream, false);
990 	rockchip_clear_system_status(SYS_STATUS_CIF0);
991 	return ret;
992 }
993 
rkcif_create_link(struct rkcif_device * dev,struct rkcif_sensor_info * sensor,u32 stream_num,bool * mipi_lvds_linked)994 static int rkcif_create_link(struct rkcif_device *dev,
995 			     struct rkcif_sensor_info *sensor,
996 			     u32 stream_num,
997 			     bool *mipi_lvds_linked)
998 {
999 	struct rkcif_sensor_info linked_sensor;
1000 	struct media_entity *source_entity, *sink_entity;
1001 	int ret = 0;
1002 	u32 flags, pad, id;
1003 
1004 	linked_sensor.lanes = sensor->lanes;
1005 
1006 	if (sensor->mbus.type == V4L2_MBUS_CCP2) {
1007 		linked_sensor.sd = &dev->lvds_subdev.sd;
1008 		dev->lvds_subdev.sensor_self.sd = &dev->lvds_subdev.sd;
1009 		dev->lvds_subdev.sensor_self.lanes = sensor->lanes;
1010 		memcpy(&dev->lvds_subdev.sensor_self.mbus, &sensor->mbus,
1011 		       sizeof(struct v4l2_mbus_config));
1012 	} else {
1013 		linked_sensor.sd = sensor->sd;
1014 	}
1015 
1016 	memcpy(&linked_sensor.mbus, &sensor->mbus,
1017 	       sizeof(struct v4l2_mbus_config));
1018 
1019 	for (pad = 0; pad < linked_sensor.sd->entity.num_pads; pad++) {
1020 		if (linked_sensor.sd->entity.pads[pad].flags &
1021 		    MEDIA_PAD_FL_SOURCE) {
1022 			if (pad == linked_sensor.sd->entity.num_pads) {
1023 				dev_err(dev->dev,
1024 					"failed to find src pad for %s\n",
1025 					linked_sensor.sd->name);
1026 
1027 				break;
1028 			}
1029 
1030 			if ((linked_sensor.mbus.type == V4L2_MBUS_BT656 ||
1031 			     linked_sensor.mbus.type == V4L2_MBUS_PARALLEL) &&
1032 			    (dev->chip_id == CHIP_RK1808_CIF)) {
1033 				source_entity = &linked_sensor.sd->entity;
1034 				sink_entity = &dev->stream[RKCIF_STREAM_CIF].vnode.vdev.entity;
1035 
1036 				ret = media_create_pad_link(source_entity,
1037 							    pad,
1038 							    sink_entity,
1039 							    0,
1040 							    MEDIA_LNK_FL_ENABLED);
1041 				if (ret)
1042 					dev_err(dev->dev, "failed to create link for %s\n",
1043 						linked_sensor.sd->name);
1044 				break;
1045 			}
1046 
1047 			if ((linked_sensor.mbus.type == V4L2_MBUS_BT656 ||
1048 			     linked_sensor.mbus.type == V4L2_MBUS_PARALLEL) &&
1049 			    (dev->chip_id >= CHIP_RV1126_CIF)) {
1050 				source_entity = &linked_sensor.sd->entity;
1051 				sink_entity = &dev->stream[pad].vnode.vdev.entity;
1052 
1053 				ret = media_create_pad_link(source_entity,
1054 							    pad,
1055 							    sink_entity,
1056 							    0,
1057 							    MEDIA_LNK_FL_ENABLED);
1058 				if (ret)
1059 					dev_err(dev->dev, "failed to create link for %s pad[%d]\n",
1060 						linked_sensor.sd->name, pad);
1061 				continue;
1062 			}
1063 
1064 			for (id = 0; id < stream_num; id++) {
1065 				source_entity = &linked_sensor.sd->entity;
1066 				sink_entity = &dev->stream[id].vnode.vdev.entity;
1067 
1068 				if ((dev->chip_id < CHIP_RK1808_CIF) ||
1069 				    (id == pad - 1 && !(*mipi_lvds_linked)))
1070 					flags = MEDIA_LNK_FL_ENABLED;
1071 				else
1072 					flags = 0;
1073 
1074 				ret = media_create_pad_link(source_entity,
1075 							    pad,
1076 							    sink_entity,
1077 							    0,
1078 							    flags);
1079 				if (ret) {
1080 					dev_err(dev->dev,
1081 						"failed to create link for %s\n",
1082 						linked_sensor.sd->name);
1083 					break;
1084 				}
1085 			}
1086 			if (dev->chip_id == CHIP_RK3588_CIF) {
1087 				for (id = 0; id < stream_num; id++) {
1088 					source_entity = &linked_sensor.sd->entity;
1089 					sink_entity = &dev->scale_vdev[id].vnode.vdev.entity;
1090 
1091 					if ((id + stream_num) == pad - 1 && !(*mipi_lvds_linked))
1092 						flags = MEDIA_LNK_FL_ENABLED;
1093 					else
1094 						flags = 0;
1095 
1096 					ret = media_create_pad_link(source_entity,
1097 								    pad,
1098 								    sink_entity,
1099 								    0,
1100 								    flags);
1101 					if (ret) {
1102 						dev_err(dev->dev,
1103 							"failed to create link for %s\n",
1104 							linked_sensor.sd->name);
1105 						break;
1106 					}
1107 				}
1108 			}
1109 		}
1110 	}
1111 
1112 	if (sensor->mbus.type == V4L2_MBUS_CCP2) {
1113 		source_entity = &sensor->sd->entity;
1114 		sink_entity = &linked_sensor.sd->entity;
1115 		ret = media_create_pad_link(source_entity,
1116 					    1,
1117 					    sink_entity,
1118 					    0,
1119 					    MEDIA_LNK_FL_ENABLED);
1120 		if (ret)
1121 			dev_err(dev->dev, "failed to create link between %s and %s\n",
1122 				linked_sensor.sd->name,
1123 				sensor->sd->name);
1124 	}
1125 
1126 	if (linked_sensor.mbus.type != V4L2_MBUS_BT656 &&
1127 	    linked_sensor.mbus.type != V4L2_MBUS_PARALLEL)
1128 		*mipi_lvds_linked = true;
1129 	return ret;
1130 }
1131 
1132 /***************************** media controller *******************************/
rkcif_create_links(struct rkcif_device * dev)1133 static int rkcif_create_links(struct rkcif_device *dev)
1134 {
1135 	u32 s = 0;
1136 	u32 stream_num = 0;
1137 	bool mipi_lvds_linked = false;
1138 
1139 	if (dev->chip_id < CHIP_RV1126_CIF) {
1140 		if (dev->inf_id == RKCIF_MIPI_LVDS)
1141 			stream_num = RKCIF_MAX_STREAM_MIPI;
1142 		else
1143 			stream_num = RKCIF_SINGLE_STREAM;
1144 	} else {
1145 		stream_num = RKCIF_MAX_STREAM_MIPI;
1146 	}
1147 
1148 	/* sensor links(or mipi-phy) */
1149 	for (s = 0; s < dev->num_sensors; ++s) {
1150 		struct rkcif_sensor_info *sensor = &dev->sensors[s];
1151 
1152 		rkcif_create_link(dev, sensor, stream_num, &mipi_lvds_linked);
1153 	}
1154 
1155 	return 0;
1156 }
1157 
_set_pipeline_default_fmt(struct rkcif_device * dev)1158 static int _set_pipeline_default_fmt(struct rkcif_device *dev)
1159 {
1160 	rkcif_set_default_fmt(dev);
1161 	return 0;
1162 }
1163 
subdev_asyn_register_itf(struct rkcif_device * dev)1164 static int subdev_asyn_register_itf(struct rkcif_device *dev)
1165 {
1166 	struct sditf_priv *sditf = dev->sditf;
1167 	int ret = 0;
1168 
1169 	ret = rkcif_update_sensor_info(&dev->stream[0]);
1170 	if (ret) {
1171 		v4l2_err(&dev->v4l2_dev,
1172 			 "There is not terminal subdev, not synchronized with ISP\n");
1173 		return 0;
1174 	}
1175 	if (sditf)
1176 		ret = v4l2_async_register_subdev_sensor_common(&sditf->sd);
1177 
1178 	return ret;
1179 }
1180 
subdev_notifier_complete(struct v4l2_async_notifier * notifier)1181 static int subdev_notifier_complete(struct v4l2_async_notifier *notifier)
1182 {
1183 	struct rkcif_device *dev;
1184 	struct rkcif_sensor_info *sensor;
1185 	struct v4l2_subdev *sd;
1186 	struct v4l2_device *v4l2_dev = NULL;
1187 	int ret, index;
1188 
1189 	dev = container_of(notifier, struct rkcif_device, notifier);
1190 
1191 	v4l2_dev = &dev->v4l2_dev;
1192 
1193 	for (index = 0; index < dev->num_sensors; index++) {
1194 		sensor = &dev->sensors[index];
1195 
1196 		list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
1197 			if (sd->ops) {
1198 				if (sd == sensor->sd) {
1199 					ret = v4l2_subdev_call(sd,
1200 							       pad,
1201 							       get_mbus_config,
1202 							       0,
1203 							       &sensor->mbus);
1204 					if (ret)
1205 						v4l2_err(v4l2_dev,
1206 							 "get mbus config failed for linking\n");
1207 				}
1208 			}
1209 		}
1210 
1211 		if (sensor->mbus.type == V4L2_MBUS_CCP2 ||
1212 		    sensor->mbus.type == V4L2_MBUS_CSI2_DPHY ||
1213 		    sensor->mbus.type == V4L2_MBUS_CSI2_CPHY) {
1214 
1215 			switch (sensor->mbus.flags & V4L2_MBUS_CSI2_LANES) {
1216 			case V4L2_MBUS_CSI2_1_LANE:
1217 				sensor->lanes = 1;
1218 				break;
1219 			case V4L2_MBUS_CSI2_2_LANE:
1220 				sensor->lanes = 2;
1221 				break;
1222 			case V4L2_MBUS_CSI2_3_LANE:
1223 				sensor->lanes = 3;
1224 				break;
1225 			case V4L2_MBUS_CSI2_4_LANE:
1226 				sensor->lanes = 4;
1227 				break;
1228 			default:
1229 				sensor->lanes = 1;
1230 			}
1231 		}
1232 
1233 		if (sensor->mbus.type == V4L2_MBUS_CCP2) {
1234 			ret = rkcif_register_lvds_subdev(dev);
1235 			if (ret < 0) {
1236 				v4l2_err(&dev->v4l2_dev,
1237 					 "Err: register lvds subdev failed!!!\n");
1238 				goto notifier_end;
1239 			}
1240 			break;
1241 		}
1242 
1243 		if (sensor->mbus.type == V4L2_MBUS_PARALLEL ||
1244 		    sensor->mbus.type == V4L2_MBUS_BT656) {
1245 			ret = rkcif_register_dvp_sof_subdev(dev);
1246 			if (ret < 0) {
1247 				v4l2_err(&dev->v4l2_dev,
1248 					 "Err: register dvp sof subdev failed!!!\n");
1249 				goto notifier_end;
1250 			}
1251 			break;
1252 		}
1253 	}
1254 
1255 	ret = rkcif_create_links(dev);
1256 	if (ret < 0)
1257 		goto unregister_lvds;
1258 
1259 	ret = v4l2_device_register_subdev_nodes(&dev->v4l2_dev);
1260 	if (ret < 0)
1261 		goto unregister_lvds;
1262 
1263 	ret = _set_pipeline_default_fmt(dev);
1264 	if (ret < 0)
1265 		goto unregister_lvds;
1266 
1267 	v4l2_info(&dev->v4l2_dev, "Async subdev notifier completed\n");
1268 
1269 	return ret;
1270 
1271 unregister_lvds:
1272 	rkcif_unregister_lvds_subdev(dev);
1273 	rkcif_unregister_dvp_sof_subdev(dev);
1274 notifier_end:
1275 	return ret;
1276 }
1277 
1278 struct rkcif_async_subdev {
1279 	struct v4l2_async_subdev asd;
1280 	struct v4l2_mbus_config mbus;
1281 	int lanes;
1282 };
1283 
subdev_notifier_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)1284 static int subdev_notifier_bound(struct v4l2_async_notifier *notifier,
1285 				 struct v4l2_subdev *subdev,
1286 				 struct v4l2_async_subdev *asd)
1287 {
1288 	struct rkcif_device *cif_dev = container_of(notifier,
1289 					struct rkcif_device, notifier);
1290 	struct rkcif_async_subdev *s_asd = container_of(asd,
1291 					struct rkcif_async_subdev, asd);
1292 
1293 	if (cif_dev->num_sensors == ARRAY_SIZE(cif_dev->sensors)) {
1294 		v4l2_err(&cif_dev->v4l2_dev,
1295 			 "%s: the num of subdev is beyond %d\n",
1296 			 __func__, cif_dev->num_sensors);
1297 		return -EBUSY;
1298 	}
1299 
1300 	cif_dev->sensors[cif_dev->num_sensors].lanes = s_asd->lanes;
1301 	cif_dev->sensors[cif_dev->num_sensors].mbus = s_asd->mbus;
1302 	cif_dev->sensors[cif_dev->num_sensors].sd = subdev;
1303 	++cif_dev->num_sensors;
1304 
1305 	v4l2_err(subdev, "Async registered subdev\n");
1306 
1307 	return 0;
1308 }
1309 
rkcif_fwnode_parse(struct device * dev,struct v4l2_fwnode_endpoint * vep,struct v4l2_async_subdev * asd)1310 static int rkcif_fwnode_parse(struct device *dev,
1311 			      struct v4l2_fwnode_endpoint *vep,
1312 			      struct v4l2_async_subdev *asd)
1313 {
1314 	struct rkcif_async_subdev *rk_asd =
1315 			container_of(asd, struct rkcif_async_subdev, asd);
1316 	struct v4l2_fwnode_bus_parallel *bus = &vep->bus.parallel;
1317 
1318 	if (vep->bus_type != V4L2_MBUS_BT656 &&
1319 	    vep->bus_type != V4L2_MBUS_PARALLEL &&
1320 	    vep->bus_type != V4L2_MBUS_CSI2_DPHY &&
1321 	    vep->bus_type != V4L2_MBUS_CSI2_CPHY &&
1322 	    vep->bus_type != V4L2_MBUS_CCP2)
1323 		return 0;
1324 
1325 	rk_asd->mbus.type = vep->bus_type;
1326 
1327 	if (vep->bus_type == V4L2_MBUS_CSI2_DPHY ||
1328 	    vep->bus_type == V4L2_MBUS_CSI2_CPHY) {
1329 		rk_asd->mbus.flags = vep->bus.mipi_csi2.flags;
1330 		rk_asd->lanes = vep->bus.mipi_csi2.num_data_lanes;
1331 	} else if (vep->bus_type == V4L2_MBUS_CCP2) {
1332 		rk_asd->lanes = vep->bus.mipi_csi1.data_lane;
1333 	} else {
1334 		rk_asd->mbus.flags = bus->flags;
1335 	}
1336 
1337 	return 0;
1338 }
1339 
1340 static const struct v4l2_async_notifier_operations subdev_notifier_ops = {
1341 	.bound = subdev_notifier_bound,
1342 	.complete = subdev_notifier_complete,
1343 };
1344 
cif_subdev_notifier(struct rkcif_device * cif_dev)1345 static int cif_subdev_notifier(struct rkcif_device *cif_dev)
1346 {
1347 	struct v4l2_async_notifier *ntf = &cif_dev->notifier;
1348 	struct device *dev = cif_dev->dev;
1349 	int ret;
1350 
1351 	v4l2_async_notifier_init(ntf);
1352 
1353 	ret = v4l2_async_notifier_parse_fwnode_endpoints(
1354 		dev, ntf, sizeof(struct rkcif_async_subdev), rkcif_fwnode_parse);
1355 
1356 	if (ret < 0) {
1357 		v4l2_err(&cif_dev->v4l2_dev,
1358 			 "%s: parse fwnode failed\n", __func__);
1359 		return ret;
1360 	}
1361 
1362 	ntf->ops = &subdev_notifier_ops;
1363 
1364 	ret = v4l2_async_notifier_register(&cif_dev->v4l2_dev, ntf);
1365 
1366 	return ret;
1367 }
1368 
1369 /***************************** platform deive *******************************/
1370 
rkcif_register_platform_subdevs(struct rkcif_device * cif_dev)1371 static int rkcif_register_platform_subdevs(struct rkcif_device *cif_dev)
1372 {
1373 	int stream_num = 0, ret;
1374 
1375 	if (cif_dev->chip_id < CHIP_RV1126_CIF) {
1376 		if (cif_dev->inf_id == RKCIF_MIPI_LVDS) {
1377 			stream_num = RKCIF_MAX_STREAM_MIPI;
1378 			ret = rkcif_register_stream_vdevs(cif_dev, stream_num,
1379 							  true);
1380 		} else {
1381 			stream_num = RKCIF_SINGLE_STREAM;
1382 			ret = rkcif_register_stream_vdevs(cif_dev, stream_num,
1383 							  false);
1384 		}
1385 	} else {
1386 		stream_num = RKCIF_MAX_STREAM_MIPI;
1387 		ret = rkcif_register_stream_vdevs(cif_dev, stream_num, true);
1388 	}
1389 
1390 	if (ret < 0) {
1391 		dev_err(cif_dev->dev, "cif register stream[%d] failed!\n", stream_num);
1392 		return -EINVAL;
1393 	}
1394 
1395 	if (cif_dev->chip_id == CHIP_RK3588_CIF) {
1396 		ret = rkcif_register_scale_vdevs(cif_dev, RKCIF_MAX_SCALE_CH, true);
1397 
1398 		if (ret < 0) {
1399 			dev_err(cif_dev->dev, "cif register scale_vdev[%d] failed!\n", stream_num);
1400 			goto err_unreg_stream_vdev;
1401 		}
1402 	}
1403 	ret = cif_subdev_notifier(cif_dev);
1404 	if (ret < 0) {
1405 		v4l2_err(&cif_dev->v4l2_dev,
1406 			 "Failed to register subdev notifier(%d)\n", ret);
1407 		goto err_unreg_stream_vdev;
1408 	}
1409 
1410 	return 0;
1411 err_unreg_stream_vdev:
1412 	rkcif_unregister_stream_vdevs(cif_dev, stream_num);
1413 	if (cif_dev->chip_id == CHIP_RK3588_CIF)
1414 		rkcif_unregister_scale_vdevs(cif_dev, RKCIF_MAX_SCALE_CH);
1415 
1416 	return ret;
1417 }
1418 
rkcif_irq_handler(int irq,struct rkcif_device * cif_dev)1419 static irqreturn_t rkcif_irq_handler(int irq, struct rkcif_device *cif_dev)
1420 {
1421 	if (cif_dev->workmode == RKCIF_WORKMODE_PINGPONG) {
1422 		if (cif_dev->chip_id < CHIP_RK3588_CIF)
1423 			rkcif_irq_pingpong(cif_dev);
1424 		else
1425 			rkcif_irq_pingpong_v1(cif_dev);
1426 	} else {
1427 		rkcif_irq_oneframe(cif_dev);
1428 	}
1429 	return IRQ_HANDLED;
1430 }
1431 
rkcif_irq_lite_handler(int irq,struct rkcif_device * cif_dev)1432 static irqreturn_t rkcif_irq_lite_handler(int irq, struct rkcif_device *cif_dev)
1433 {
1434 	rkcif_irq_lite_lvds(cif_dev);
1435 
1436 	return IRQ_HANDLED;
1437 }
1438 
rkcif_soft_reset(struct rkcif_device * cif_dev,bool is_rst_iommu)1439 void rkcif_soft_reset(struct rkcif_device *cif_dev, bool is_rst_iommu)
1440 {
1441 	struct rkcif_hw *hw_dev = cif_dev->hw_dev;
1442 	bool can_reset = true;
1443 	int i;
1444 
1445 	if (!cif_dev->hw_dev)
1446 		return;
1447 
1448 	for (i = 0; i < hw_dev->dev_num; i++)
1449 		if (atomic_read(&hw_dev->cif_dev[i]->pipe.stream_cnt) != 0) {
1450 			can_reset = false;
1451 			break;
1452 		}
1453 
1454 	if (can_reset)
1455 		rkcif_hw_soft_reset(cif_dev->hw_dev, is_rst_iommu);
1456 }
1457 
rkcif_attach_hw(struct rkcif_device * cif_dev)1458 int rkcif_attach_hw(struct rkcif_device *cif_dev)
1459 {
1460 	struct device_node *np;
1461 	struct platform_device *pdev;
1462 	struct rkcif_hw *hw;
1463 
1464 	if (cif_dev->hw_dev)
1465 		return 0;
1466 
1467 	cif_dev->chip_id = CHIP_RV1126_CIF_LITE;
1468 	np = of_parse_phandle(cif_dev->dev->of_node, "rockchip,hw", 0);
1469 	if (!np || !of_device_is_available(np)) {
1470 		dev_err(cif_dev->dev, "failed to get cif hw node\n");
1471 		return -ENODEV;
1472 	}
1473 
1474 	pdev = of_find_device_by_node(np);
1475 	of_node_put(np);
1476 	if (!pdev) {
1477 		dev_err(cif_dev->dev, "failed to get cif hw from node\n");
1478 		return -ENODEV;
1479 	}
1480 
1481 	hw = platform_get_drvdata(pdev);
1482 	if (!hw) {
1483 		dev_err(cif_dev->dev, "failed attach cif hw\n");
1484 		return -EINVAL;
1485 	}
1486 
1487 	hw->cif_dev[hw->dev_num] = cif_dev;
1488 	hw->dev_num++;
1489 	cif_dev->hw_dev = hw;
1490 	cif_dev->chip_id = hw->chip_id;
1491 	dev_info(cif_dev->dev, "attach to cif hw node\n");
1492 
1493 	return 0;
1494 }
1495 
rkcif_detach_hw(struct rkcif_device * cif_dev)1496 static int rkcif_detach_hw(struct rkcif_device *cif_dev)
1497 {
1498 	struct rkcif_hw *hw = cif_dev->hw_dev;
1499 	int i;
1500 
1501 	for (i = 0; i < hw->dev_num; i++) {
1502 		if (hw->cif_dev[i] == cif_dev) {
1503 			if ((i + 1) < hw->dev_num) {
1504 				hw->cif_dev[i] = hw->cif_dev[i + 1];
1505 				hw->cif_dev[i + 1] = NULL;
1506 			} else {
1507 				hw->cif_dev[i] = NULL;
1508 			}
1509 
1510 			hw->dev_num--;
1511 			dev_info(cif_dev->dev, "detach to cif hw node\n");
1512 			break;
1513 		}
1514 	}
1515 
1516 	return 0;
1517 }
1518 
rkcif_get_monitor_mode(enum rkcif_monitor_mode mode)1519 static char *rkcif_get_monitor_mode(enum rkcif_monitor_mode mode)
1520 {
1521 	switch (mode) {
1522 	case RKCIF_MONITOR_MODE_IDLE:
1523 		return "idle";
1524 	case RKCIF_MONITOR_MODE_CONTINUE:
1525 		return "continue";
1526 	case RKCIF_MONITOR_MODE_TRIGGER:
1527 		return "trigger";
1528 	case RKCIF_MONITOR_MODE_HOTPLUG:
1529 		return "hotplug";
1530 	default:
1531 		return "unknown";
1532 	}
1533 }
1534 
rkcif_init_reset_monitor(struct rkcif_device * dev)1535 static void rkcif_init_reset_monitor(struct rkcif_device *dev)
1536 {
1537 	struct device_node *node = dev->dev->of_node;
1538 	struct rkcif_timer *timer = &dev->reset_watchdog_timer;
1539 	struct notifier_block *notifier = &dev->reset_notifier;
1540 	u32 para[8];
1541 	int i;
1542 
1543 	if (!of_property_read_u32_array(node,
1544 					OF_CIF_MONITOR_PARA,
1545 					para,
1546 					CIF_MONITOR_PARA_NUM)) {
1547 		for (i = 0; i < CIF_MONITOR_PARA_NUM; i++) {
1548 			if (i == 0) {
1549 				timer->monitor_mode = para[0];
1550 				v4l2_info(&dev->v4l2_dev,
1551 					  "%s: timer monitor mode:%s\n",
1552 					  __func__, rkcif_get_monitor_mode(timer->monitor_mode));
1553 			}
1554 
1555 			if (i == 1) {
1556 				timer->triggered_frame_num = para[1];
1557 				v4l2_info(&dev->v4l2_dev,
1558 					  "timer triggered frm num:%d\n",
1559 					  timer->triggered_frame_num);
1560 			}
1561 
1562 			if (i == 2) {
1563 				timer->frm_num_of_monitor_cycle = para[2];
1564 				v4l2_info(&dev->v4l2_dev,
1565 					  "timer frm num of monitor cycle:%d\n",
1566 					  timer->frm_num_of_monitor_cycle);
1567 			}
1568 
1569 			if (i == 3) {
1570 				timer->err_time_interval = para[3];
1571 				v4l2_info(&dev->v4l2_dev,
1572 					  "timer err time for keeping:%d ms\n",
1573 					  timer->err_time_interval);
1574 			}
1575 
1576 			if (i == 4) {
1577 				timer->csi2_err_ref_cnt = para[4];
1578 				v4l2_info(&dev->v4l2_dev,
1579 					  "timer csi2 err ref val for resetting:%d\n",
1580 					  timer->csi2_err_ref_cnt);
1581 			}
1582 		}
1583 	} else {
1584 		timer->monitor_mode = RKCIF_MONITOR_MODE_IDLE;
1585 		timer->err_time_interval = 0xffffffff;
1586 		timer->frm_num_of_monitor_cycle = 0xffffffff;
1587 		timer->triggered_frame_num =  0xffffffff;
1588 		timer->csi2_err_ref_cnt = 0xffffffff;
1589 	}
1590 
1591 	timer->is_running = false;
1592 	timer->is_triggered = false;
1593 	timer->is_buf_stop_update = false;
1594 	timer->csi2_err_cnt_even = 0;
1595 	timer->csi2_err_cnt_odd = 0;
1596 	timer->csi2_err_fs_fe_cnt = 0;
1597 	timer->csi2_err_fs_fe_detect_cnt = 0;
1598 	timer->csi2_err_triggered_cnt = 0;
1599 	timer->csi2_first_err_timestamp = 0;
1600 
1601 	timer_setup(&timer->timer, rkcif_reset_watchdog_timer_handler, 0);
1602 
1603 	notifier->priority = 1;
1604 	notifier->notifier_call = rkcif_reset_notifier;
1605 	rkcif_csi2_register_notifier(notifier);
1606 	INIT_WORK(&dev->reset_work.work, rkcif_reset_work);
1607 }
1608 
rkcif_plat_init(struct rkcif_device * cif_dev,struct device_node * node,int inf_id)1609 int rkcif_plat_init(struct rkcif_device *cif_dev, struct device_node *node, int inf_id)
1610 {
1611 	struct device *dev = cif_dev->dev;
1612 	struct v4l2_device *v4l2_dev;
1613 	int ret;
1614 
1615 	cif_dev->hdr.hdr_mode = NO_HDR;
1616 	cif_dev->inf_id = inf_id;
1617 
1618 	mutex_init(&cif_dev->stream_lock);
1619 	mutex_init(&cif_dev->scale_lock);
1620 	spin_lock_init(&cif_dev->hdr_lock);
1621 	spin_lock_init(&cif_dev->reset_watchdog_timer.timer_lock);
1622 	spin_lock_init(&cif_dev->reset_watchdog_timer.csi2_err_lock);
1623 	atomic_set(&cif_dev->pipe.power_cnt, 0);
1624 	atomic_set(&cif_dev->pipe.stream_cnt, 0);
1625 	atomic_set(&cif_dev->fh_cnt, 0);
1626 	cif_dev->is_start_hdr = false;
1627 	cif_dev->pipe.open = rkcif_pipeline_open;
1628 	cif_dev->pipe.close = rkcif_pipeline_close;
1629 	cif_dev->pipe.set_stream = rkcif_pipeline_set_stream;
1630 	cif_dev->isr_hdl = rkcif_irq_handler;
1631 	cif_dev->id_use_cnt = 0;
1632 	cif_dev->sync_type = NO_SYNC_MODE;
1633 	if (cif_dev->chip_id == CHIP_RV1126_CIF_LITE)
1634 		cif_dev->isr_hdl = rkcif_irq_lite_handler;
1635 
1636 	if (cif_dev->chip_id < CHIP_RV1126_CIF) {
1637 		if (cif_dev->inf_id == RKCIF_MIPI_LVDS) {
1638 			rkcif_stream_init(cif_dev, RKCIF_STREAM_MIPI_ID0);
1639 			rkcif_stream_init(cif_dev, RKCIF_STREAM_MIPI_ID1);
1640 			rkcif_stream_init(cif_dev, RKCIF_STREAM_MIPI_ID2);
1641 			rkcif_stream_init(cif_dev, RKCIF_STREAM_MIPI_ID3);
1642 		} else {
1643 			rkcif_stream_init(cif_dev, RKCIF_STREAM_CIF);
1644 		}
1645 	} else {
1646 		/* for rv1126/rk356x, bt656/bt1120/mipi are multi channels */
1647 		rkcif_stream_init(cif_dev, RKCIF_STREAM_MIPI_ID0);
1648 		rkcif_stream_init(cif_dev, RKCIF_STREAM_MIPI_ID1);
1649 		rkcif_stream_init(cif_dev, RKCIF_STREAM_MIPI_ID2);
1650 		rkcif_stream_init(cif_dev, RKCIF_STREAM_MIPI_ID3);
1651 	}
1652 
1653 	if (cif_dev->chip_id == CHIP_RK3588_CIF) {
1654 		rkcif_init_scale_vdev(cif_dev, RKCIF_SCALE_CH0);
1655 		rkcif_init_scale_vdev(cif_dev, RKCIF_SCALE_CH1);
1656 		rkcif_init_scale_vdev(cif_dev, RKCIF_SCALE_CH2);
1657 		rkcif_init_scale_vdev(cif_dev, RKCIF_SCALE_CH3);
1658 	}
1659 
1660 #if defined(CONFIG_ROCKCHIP_CIF_WORKMODE_PINGPONG)
1661 	cif_dev->workmode = RKCIF_WORKMODE_PINGPONG;
1662 #elif defined(CONFIG_ROCKCHIP_CIF_WORKMODE_ONEFRAME)
1663 	cif_dev->workmode = RKCIF_WORKMODE_ONEFRAME;
1664 #else
1665 	cif_dev->workmode = RKCIF_WORKMODE_PINGPONG;
1666 #endif
1667 
1668 #if defined(CONFIG_ROCKCHIP_CIF_USE_DUMMY_BUF)
1669 	cif_dev->is_use_dummybuf = true;
1670 #else
1671 	cif_dev->is_use_dummybuf = false;
1672 #endif
1673 
1674 	strlcpy(cif_dev->media_dev.model, dev_name(dev),
1675 		sizeof(cif_dev->media_dev.model));
1676 	cif_dev->csi_host_idx = of_alias_get_id(node, "rkcif_mipi_lvds");
1677 	if (cif_dev->csi_host_idx < 0 || cif_dev->csi_host_idx > 5)
1678 		cif_dev->csi_host_idx = 0;
1679 	cif_dev->media_dev.dev = dev;
1680 	v4l2_dev = &cif_dev->v4l2_dev;
1681 	v4l2_dev->mdev = &cif_dev->media_dev;
1682 	strlcpy(v4l2_dev->name, dev_name(dev), sizeof(v4l2_dev->name));
1683 
1684 	ret = v4l2_device_register(cif_dev->dev, &cif_dev->v4l2_dev);
1685 	if (ret < 0)
1686 		return ret;
1687 
1688 	media_device_init(&cif_dev->media_dev);
1689 	ret = media_device_register(&cif_dev->media_dev);
1690 	if (ret < 0) {
1691 		v4l2_err(v4l2_dev, "Failed to register media device: %d\n",
1692 			 ret);
1693 		goto err_unreg_v4l2_dev;
1694 	}
1695 
1696 	/* create & register platefom subdev (from of_node) */
1697 	ret = rkcif_register_platform_subdevs(cif_dev);
1698 	if (ret < 0)
1699 		goto err_unreg_media_dev;
1700 
1701 	if (cif_dev->chip_id == CHIP_RV1126_CIF ||
1702 	    cif_dev->chip_id == CHIP_RV1126_CIF_LITE ||
1703 	    cif_dev->chip_id == CHIP_RK3568_CIF)
1704 		rkcif_register_luma_vdev(&cif_dev->luma_vdev, v4l2_dev, cif_dev);
1705 
1706 	mutex_lock(&rkcif_dev_mutex);
1707 	list_add_tail(&cif_dev->list, &rkcif_device_list);
1708 	mutex_unlock(&rkcif_dev_mutex);
1709 
1710 	return 0;
1711 
1712 err_unreg_media_dev:
1713 	media_device_unregister(&cif_dev->media_dev);
1714 err_unreg_v4l2_dev:
1715 	v4l2_device_unregister(&cif_dev->v4l2_dev);
1716 	return ret;
1717 }
1718 
rkcif_plat_uninit(struct rkcif_device * cif_dev)1719 int rkcif_plat_uninit(struct rkcif_device *cif_dev)
1720 {
1721 	int stream_num = 0;
1722 
1723 	if (cif_dev->active_sensor->mbus.type == V4L2_MBUS_CCP2)
1724 		rkcif_unregister_lvds_subdev(cif_dev);
1725 
1726 	if (cif_dev->active_sensor->mbus.type == V4L2_MBUS_BT656 ||
1727 	    cif_dev->active_sensor->mbus.type == V4L2_MBUS_PARALLEL)
1728 		rkcif_unregister_dvp_sof_subdev(cif_dev);
1729 
1730 	media_device_unregister(&cif_dev->media_dev);
1731 	v4l2_device_unregister(&cif_dev->v4l2_dev);
1732 
1733 	if (cif_dev->chip_id < CHIP_RV1126_CIF) {
1734 		if (cif_dev->inf_id == RKCIF_MIPI_LVDS)
1735 			stream_num = RKCIF_MAX_STREAM_MIPI;
1736 		else
1737 			stream_num = RKCIF_SINGLE_STREAM;
1738 	} else {
1739 		stream_num = RKCIF_MAX_STREAM_MIPI;
1740 	}
1741 	rkcif_unregister_stream_vdevs(cif_dev, stream_num);
1742 
1743 	return 0;
1744 }
1745 
1746 static const struct rkcif_match_data rkcif_dvp_match_data = {
1747 	.inf_id = RKCIF_DVP,
1748 };
1749 
1750 static const struct rkcif_match_data rkcif_mipi_lvds_match_data = {
1751 	.inf_id = RKCIF_MIPI_LVDS,
1752 };
1753 
1754 static const struct of_device_id rkcif_plat_of_match[] = {
1755 	{
1756 		.compatible = "rockchip,rkcif-dvp",
1757 		.data = &rkcif_dvp_match_data,
1758 	},
1759 	{
1760 		.compatible = "rockchip,rkcif-mipi-lvds",
1761 		.data = &rkcif_mipi_lvds_match_data,
1762 	},
1763 	{},
1764 };
1765 
rkcif_parse_dts(struct rkcif_device * cif_dev)1766 static void rkcif_parse_dts(struct rkcif_device *cif_dev)
1767 {
1768 	int ret = 0;
1769 	struct device_node *node = cif_dev->dev->of_node;
1770 
1771 	ret = of_property_read_u32(node,
1772 			     OF_CIF_WAIT_LINE,
1773 			     &cif_dev->wait_line);
1774 	if (ret != 0)
1775 		cif_dev->wait_line = 0;
1776 	dev_info(cif_dev->dev, "rkcif wait line %d\n", cif_dev->wait_line);
1777 }
1778 
rkcif_plat_probe(struct platform_device * pdev)1779 static int rkcif_plat_probe(struct platform_device *pdev)
1780 {
1781 	const struct of_device_id *match;
1782 	struct device_node *node = pdev->dev.of_node;
1783 	struct device *dev = &pdev->dev;
1784 	struct rkcif_device *cif_dev;
1785 	const struct rkcif_match_data *data;
1786 	int ret;
1787 
1788 	sprintf(rkcif_version, "v%02x.%02x.%02x",
1789 		RKCIF_DRIVER_VERSION >> 16,
1790 		(RKCIF_DRIVER_VERSION & 0xff00) >> 8,
1791 		RKCIF_DRIVER_VERSION & 0x00ff);
1792 
1793 	dev_info(dev, "rkcif driver version: %s\n", rkcif_version);
1794 
1795 	match = of_match_node(rkcif_plat_of_match, node);
1796 	if (IS_ERR(match))
1797 		return PTR_ERR(match);
1798 	data = match->data;
1799 
1800 	cif_dev = devm_kzalloc(dev, sizeof(*cif_dev), GFP_KERNEL);
1801 	if (!cif_dev)
1802 		return -ENOMEM;
1803 
1804 	dev_set_drvdata(dev, cif_dev);
1805 	cif_dev->dev = dev;
1806 
1807 	if (sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp))
1808 		return -ENODEV;
1809 
1810 	rkcif_attach_hw(cif_dev);
1811 
1812 	rkcif_parse_dts(cif_dev);
1813 
1814 	ret = rkcif_plat_init(cif_dev, node, data->inf_id);
1815 	if (ret) {
1816 		rkcif_detach_hw(cif_dev);
1817 		return ret;
1818 	}
1819 
1820 	if (rkcif_proc_init(cif_dev))
1821 		dev_warn(dev, "dev:%s create proc failed\n", dev_name(dev));
1822 
1823 	rkcif_init_reset_monitor(cif_dev);
1824 	rkcif_soft_reset(cif_dev, false);
1825 	pm_runtime_enable(&pdev->dev);
1826 
1827 	return 0;
1828 }
1829 
rkcif_plat_remove(struct platform_device * pdev)1830 static int rkcif_plat_remove(struct platform_device *pdev)
1831 {
1832 	struct rkcif_device *cif_dev = platform_get_drvdata(pdev);
1833 
1834 	rkcif_plat_uninit(cif_dev);
1835 	rkcif_detach_hw(cif_dev);
1836 	rkcif_proc_cleanup(cif_dev);
1837 	rkcif_csi2_unregister_notifier(&cif_dev->reset_notifier);
1838 	sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
1839 	del_timer_sync(&cif_dev->reset_watchdog_timer.timer);
1840 
1841 	return 0;
1842 }
1843 
rkcif_runtime_suspend(struct device * dev)1844 static int __maybe_unused rkcif_runtime_suspend(struct device *dev)
1845 {
1846 	struct rkcif_device *cif_dev = dev_get_drvdata(dev);
1847 	int ret = 0;
1848 
1849 	if (atomic_dec_return(&cif_dev->hw_dev->power_cnt))
1850 		return 0;
1851 
1852 	mutex_lock(&cif_dev->hw_dev->dev_lock);
1853 	ret = pm_runtime_put_sync(cif_dev->hw_dev->dev);
1854 	mutex_unlock(&cif_dev->hw_dev->dev_lock);
1855 	return (ret > 0) ? 0 : ret;
1856 }
1857 
rkcif_runtime_resume(struct device * dev)1858 static int __maybe_unused rkcif_runtime_resume(struct device *dev)
1859 {
1860 	struct rkcif_device *cif_dev = dev_get_drvdata(dev);
1861 	int ret = 0;
1862 
1863 	if (atomic_inc_return(&cif_dev->hw_dev->power_cnt) > 1)
1864 		return 0;
1865 	mutex_lock(&cif_dev->hw_dev->dev_lock);
1866 	ret = pm_runtime_resume_and_get(cif_dev->hw_dev->dev);
1867 	mutex_unlock(&cif_dev->hw_dev->dev_lock);
1868 	return (ret > 0) ? 0 : ret;
1869 }
1870 
__rkcif_clr_unready_dev(void)1871 static int __maybe_unused __rkcif_clr_unready_dev(void)
1872 {
1873 	struct rkcif_device *cif_dev;
1874 
1875 	mutex_lock(&rkcif_dev_mutex);
1876 
1877 	list_for_each_entry(cif_dev, &rkcif_device_list, list) {
1878 		v4l2_async_notifier_clr_unready_dev(&cif_dev->notifier);
1879 		subdev_asyn_register_itf(cif_dev);
1880 	}
1881 
1882 	mutex_unlock(&rkcif_dev_mutex);
1883 
1884 	return 0;
1885 }
1886 
rkcif_clr_unready_dev_param_set(const char * val,const struct kernel_param * kp)1887 static int rkcif_clr_unready_dev_param_set(const char *val, const struct kernel_param *kp)
1888 {
1889 #ifdef MODULE
1890 	__rkcif_clr_unready_dev();
1891 #endif
1892 
1893 	return 0;
1894 }
1895 
1896 module_param_call(clr_unready_dev, rkcif_clr_unready_dev_param_set, NULL, NULL, 0200);
1897 MODULE_PARM_DESC(clr_unready_dev, "clear unready devices");
1898 
1899 #ifndef MODULE
rkcif_clr_unready_dev(void)1900 static int __init rkcif_clr_unready_dev(void)
1901 {
1902 	__rkcif_clr_unready_dev();
1903 
1904 	return 0;
1905 }
1906 late_initcall(rkcif_clr_unready_dev);
1907 #endif
1908 
1909 static const struct dev_pm_ops rkcif_plat_pm_ops = {
1910 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1911 				pm_runtime_force_resume)
1912 	SET_RUNTIME_PM_OPS(rkcif_runtime_suspend, rkcif_runtime_resume, NULL)
1913 };
1914 
1915 struct platform_driver rkcif_plat_drv = {
1916 	.driver = {
1917 		.name = CIF_DRIVER_NAME,
1918 		.of_match_table = of_match_ptr(rkcif_plat_of_match),
1919 		.pm = &rkcif_plat_pm_ops,
1920 	},
1921 	.probe = rkcif_plat_probe,
1922 	.remove = rkcif_plat_remove,
1923 };
1924 EXPORT_SYMBOL(rkcif_plat_drv);
1925 
1926 MODULE_AUTHOR("Rockchip Camera/ISP team");
1927 MODULE_DESCRIPTION("Rockchip CIF platform driver");
1928 MODULE_LICENSE("GPL v2");
1929