• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
3 // Copyright (c) 2017-2022 Linaro Limited.
4 
5 #include <linux/clk.h>
6 #include <linux/completion.h>
7 #include <linux/i2c.h>
8 #include <linux/io.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 
15 #define CCI_HW_VERSION				0x0
16 #define CCI_RESET_CMD				0x004
17 #define CCI_RESET_CMD_MASK			0x0f73f3f7
18 #define CCI_RESET_CMD_M0_MASK			0x000003f1
19 #define CCI_RESET_CMD_M1_MASK			0x0003f001
20 #define CCI_QUEUE_START				0x008
21 #define CCI_HALT_REQ				0x034
22 #define CCI_HALT_REQ_I2C_M0_Q0Q1		BIT(0)
23 #define CCI_HALT_REQ_I2C_M1_Q0Q1		BIT(1)
24 
25 #define CCI_I2C_Mm_SCL_CTL(m)			(0x100 + 0x100 * (m))
26 #define CCI_I2C_Mm_SDA_CTL_0(m)			(0x104 + 0x100 * (m))
27 #define CCI_I2C_Mm_SDA_CTL_1(m)			(0x108 + 0x100 * (m))
28 #define CCI_I2C_Mm_SDA_CTL_2(m)			(0x10c + 0x100 * (m))
29 #define CCI_I2C_Mm_MISC_CTL(m)			(0x110 + 0x100 * (m))
30 
31 #define CCI_I2C_Mm_READ_DATA(m)			(0x118 + 0x100 * (m))
32 #define CCI_I2C_Mm_READ_BUF_LEVEL(m)		(0x11c + 0x100 * (m))
33 #define CCI_I2C_Mm_Qn_EXEC_WORD_CNT(m, n)	(0x300 + 0x200 * (m) + 0x100 * (n))
34 #define CCI_I2C_Mm_Qn_CUR_WORD_CNT(m, n)	(0x304 + 0x200 * (m) + 0x100 * (n))
35 #define CCI_I2C_Mm_Qn_CUR_CMD(m, n)		(0x308 + 0x200 * (m) + 0x100 * (n))
36 #define CCI_I2C_Mm_Qn_REPORT_STATUS(m, n)	(0x30c + 0x200 * (m) + 0x100 * (n))
37 #define CCI_I2C_Mm_Qn_LOAD_DATA(m, n)		(0x310 + 0x200 * (m) + 0x100 * (n))
38 
39 #define CCI_IRQ_GLOBAL_CLEAR_CMD		0xc00
40 #define CCI_IRQ_MASK_0				0xc04
41 #define CCI_IRQ_MASK_0_I2C_M0_RD_DONE		BIT(0)
42 #define CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT		BIT(4)
43 #define CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT		BIT(8)
44 #define CCI_IRQ_MASK_0_I2C_M1_RD_DONE		BIT(12)
45 #define CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT		BIT(16)
46 #define CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT		BIT(20)
47 #define CCI_IRQ_MASK_0_RST_DONE_ACK		BIT(24)
48 #define CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK	BIT(25)
49 #define CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK	BIT(26)
50 #define CCI_IRQ_MASK_0_I2C_M0_ERROR		0x18000ee6
51 #define CCI_IRQ_MASK_0_I2C_M1_ERROR		0x60ee6000
52 #define CCI_IRQ_CLEAR_0				0xc08
53 #define CCI_IRQ_STATUS_0			0xc0c
54 #define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE		BIT(0)
55 #define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT	BIT(4)
56 #define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT	BIT(8)
57 #define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE		BIT(12)
58 #define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT	BIT(16)
59 #define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT	BIT(20)
60 #define CCI_IRQ_STATUS_0_RST_DONE_ACK		BIT(24)
61 #define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK	BIT(25)
62 #define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK	BIT(26)
63 #define CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR	BIT(27)
64 #define CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR	BIT(28)
65 #define CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR	BIT(29)
66 #define CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR	BIT(30)
67 #define CCI_IRQ_STATUS_0_I2C_M0_ERROR		0x18000ee6
68 #define CCI_IRQ_STATUS_0_I2C_M1_ERROR		0x60ee6000
69 
70 #define CCI_TIMEOUT	(msecs_to_jiffies(100))
71 #define NUM_MASTERS	2
72 #define NUM_QUEUES	2
73 
74 /* Max number of resources + 1 for a NULL terminator */
75 #define CCI_RES_MAX	6
76 
77 #define CCI_I2C_SET_PARAM	1
78 #define CCI_I2C_REPORT		8
79 #define CCI_I2C_WRITE		9
80 #define CCI_I2C_READ		10
81 
82 #define CCI_I2C_REPORT_IRQ_EN	BIT(8)
83 
84 enum {
85 	I2C_MODE_STANDARD,
86 	I2C_MODE_FAST,
87 	I2C_MODE_FAST_PLUS,
88 };
89 
90 enum cci_i2c_queue_t {
91 	QUEUE_0,
92 	QUEUE_1
93 };
94 
95 struct hw_params {
96 	u16 thigh; /* HIGH period of the SCL clock in clock ticks */
97 	u16 tlow; /* LOW period of the SCL clock */
98 	u16 tsu_sto; /* set-up time for STOP condition */
99 	u16 tsu_sta; /* set-up time for a repeated START condition */
100 	u16 thd_dat; /* data hold time */
101 	u16 thd_sta; /* hold time (repeated) START condition */
102 	u16 tbuf; /* bus free time between a STOP and START condition */
103 	u8 scl_stretch_en;
104 	u16 trdhld;
105 	u16 tsp; /* pulse width of spikes suppressed by the input filter */
106 };
107 
108 struct cci;
109 
110 struct cci_master {
111 	struct i2c_adapter adap;
112 	u16 master;
113 	u8 mode;
114 	int status;
115 	struct completion irq_complete;
116 	struct cci *cci;
117 };
118 
119 struct cci_data {
120 	unsigned int num_masters;
121 	struct i2c_adapter_quirks quirks;
122 	u16 queue_size[NUM_QUEUES];
123 	unsigned long cci_clk_rate;
124 	struct hw_params params[3];
125 };
126 
127 struct cci {
128 	struct device *dev;
129 	void __iomem *base;
130 	unsigned int irq;
131 	const struct cci_data *data;
132 	struct clk_bulk_data *clocks;
133 	int nclocks;
134 	struct cci_master master[NUM_MASTERS];
135 };
136 
cci_isr(int irq,void * dev)137 static irqreturn_t cci_isr(int irq, void *dev)
138 {
139 	struct cci *cci = dev;
140 	u32 val, reset = 0;
141 	int ret = IRQ_NONE;
142 
143 	val = readl(cci->base + CCI_IRQ_STATUS_0);
144 	writel(val, cci->base + CCI_IRQ_CLEAR_0);
145 	writel(0x1, cci->base + CCI_IRQ_GLOBAL_CLEAR_CMD);
146 
147 	if (val & CCI_IRQ_STATUS_0_RST_DONE_ACK) {
148 		complete(&cci->master[0].irq_complete);
149 		if (cci->master[1].master)
150 			complete(&cci->master[1].irq_complete);
151 		ret = IRQ_HANDLED;
152 	}
153 
154 	if (val & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE ||
155 			val & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT ||
156 			val & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT) {
157 		cci->master[0].status = 0;
158 		complete(&cci->master[0].irq_complete);
159 		ret = IRQ_HANDLED;
160 	}
161 
162 	if (val & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE ||
163 			val & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT ||
164 			val & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT) {
165 		cci->master[1].status = 0;
166 		complete(&cci->master[1].irq_complete);
167 		ret = IRQ_HANDLED;
168 	}
169 
170 	if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK)) {
171 		reset = CCI_RESET_CMD_M0_MASK;
172 		ret = IRQ_HANDLED;
173 	}
174 
175 	if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK)) {
176 		reset = CCI_RESET_CMD_M1_MASK;
177 		ret = IRQ_HANDLED;
178 	}
179 
180 	if (unlikely(reset))
181 		writel(reset, cci->base + CCI_RESET_CMD);
182 
183 	if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_ERROR)) {
184 		if (val & CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR ||
185 			val & CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR)
186 			cci->master[0].status = -ENXIO;
187 		else
188 			cci->master[0].status = -EIO;
189 
190 		writel(CCI_HALT_REQ_I2C_M0_Q0Q1, cci->base + CCI_HALT_REQ);
191 		ret = IRQ_HANDLED;
192 	}
193 
194 	if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_ERROR)) {
195 		if (val & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR ||
196 			val & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR)
197 			cci->master[1].status = -ENXIO;
198 		else
199 			cci->master[1].status = -EIO;
200 
201 		writel(CCI_HALT_REQ_I2C_M1_Q0Q1, cci->base + CCI_HALT_REQ);
202 		ret = IRQ_HANDLED;
203 	}
204 
205 	return ret;
206 }
207 
cci_halt(struct cci * cci,u8 master_num)208 static int cci_halt(struct cci *cci, u8 master_num)
209 {
210 	struct cci_master *master;
211 	u32 val;
212 
213 	if (master_num >= cci->data->num_masters) {
214 		dev_err(cci->dev, "Unsupported master idx (%u)\n", master_num);
215 		return -EINVAL;
216 	}
217 
218 	val = BIT(master_num);
219 	master = &cci->master[master_num];
220 
221 	reinit_completion(&master->irq_complete);
222 	writel(val, cci->base + CCI_HALT_REQ);
223 
224 	if (!wait_for_completion_timeout(&master->irq_complete, CCI_TIMEOUT)) {
225 		dev_err(cci->dev, "CCI halt timeout\n");
226 		return -ETIMEDOUT;
227 	}
228 
229 	return 0;
230 }
231 
cci_reset(struct cci * cci)232 static int cci_reset(struct cci *cci)
233 {
234 	/*
235 	 * we reset the whole controller, here and for implicity use
236 	 * master[0].xxx for waiting on it.
237 	 */
238 	reinit_completion(&cci->master[0].irq_complete);
239 	writel(CCI_RESET_CMD_MASK, cci->base + CCI_RESET_CMD);
240 
241 	if (!wait_for_completion_timeout(&cci->master[0].irq_complete,
242 					 CCI_TIMEOUT)) {
243 		dev_err(cci->dev, "CCI reset timeout\n");
244 		return -ETIMEDOUT;
245 	}
246 
247 	return 0;
248 }
249 
cci_init(struct cci * cci)250 static int cci_init(struct cci *cci)
251 {
252 	u32 val = CCI_IRQ_MASK_0_I2C_M0_RD_DONE |
253 			CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT |
254 			CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT |
255 			CCI_IRQ_MASK_0_I2C_M1_RD_DONE |
256 			CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT |
257 			CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT |
258 			CCI_IRQ_MASK_0_RST_DONE_ACK |
259 			CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK |
260 			CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK |
261 			CCI_IRQ_MASK_0_I2C_M0_ERROR |
262 			CCI_IRQ_MASK_0_I2C_M1_ERROR;
263 	int i;
264 
265 	writel(val, cci->base + CCI_IRQ_MASK_0);
266 
267 	for (i = 0; i < cci->data->num_masters; i++) {
268 		int mode = cci->master[i].mode;
269 		const struct hw_params *hw;
270 
271 		if (!cci->master[i].cci)
272 			continue;
273 
274 		hw = &cci->data->params[mode];
275 
276 		val = hw->thigh << 16 | hw->tlow;
277 		writel(val, cci->base + CCI_I2C_Mm_SCL_CTL(i));
278 
279 		val = hw->tsu_sto << 16 | hw->tsu_sta;
280 		writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_0(i));
281 
282 		val = hw->thd_dat << 16 | hw->thd_sta;
283 		writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_1(i));
284 
285 		val = hw->tbuf;
286 		writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_2(i));
287 
288 		val = hw->scl_stretch_en << 8 | hw->trdhld << 4 | hw->tsp;
289 		writel(val, cci->base + CCI_I2C_Mm_MISC_CTL(i));
290 	}
291 
292 	return 0;
293 }
294 
cci_run_queue(struct cci * cci,u8 master,u8 queue)295 static int cci_run_queue(struct cci *cci, u8 master, u8 queue)
296 {
297 	u32 val;
298 
299 	val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue));
300 	writel(val, cci->base + CCI_I2C_Mm_Qn_EXEC_WORD_CNT(master, queue));
301 
302 	reinit_completion(&cci->master[master].irq_complete);
303 	val = BIT(master * 2 + queue);
304 	writel(val, cci->base + CCI_QUEUE_START);
305 
306 	if (!wait_for_completion_timeout(&cci->master[master].irq_complete,
307 					 CCI_TIMEOUT)) {
308 		dev_err(cci->dev, "master %d queue %d timeout\n",
309 			master, queue);
310 		cci_reset(cci);
311 		cci_init(cci);
312 		return -ETIMEDOUT;
313 	}
314 
315 	return cci->master[master].status;
316 }
317 
cci_validate_queue(struct cci * cci,u8 master,u8 queue)318 static int cci_validate_queue(struct cci *cci, u8 master, u8 queue)
319 {
320 	u32 val;
321 
322 	val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue));
323 	if (val == cci->data->queue_size[queue])
324 		return -EINVAL;
325 
326 	if (!val)
327 		return 0;
328 
329 	val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN;
330 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
331 
332 	return cci_run_queue(cci, master, queue);
333 }
334 
cci_i2c_read(struct cci * cci,u16 master,u16 addr,u8 * buf,u16 len)335 static int cci_i2c_read(struct cci *cci, u16 master,
336 			u16 addr, u8 *buf, u16 len)
337 {
338 	u32 val, words_read, words_exp;
339 	u8 queue = QUEUE_1;
340 	int i, index = 0, ret;
341 	bool first = true;
342 
343 	/*
344 	 * Call validate queue to make sure queue is empty before starting.
345 	 * This is to avoid overflow / underflow of queue.
346 	 */
347 	ret = cci_validate_queue(cci, master, queue);
348 	if (ret < 0)
349 		return ret;
350 
351 	val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4;
352 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
353 
354 	val = CCI_I2C_READ | len << 4;
355 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
356 
357 	ret = cci_run_queue(cci, master, queue);
358 	if (ret < 0)
359 		return ret;
360 
361 	words_read = readl(cci->base + CCI_I2C_Mm_READ_BUF_LEVEL(master));
362 	words_exp = len / 4 + 1;
363 	if (words_read != words_exp) {
364 		dev_err(cci->dev, "words read = %d, words expected = %d\n",
365 			words_read, words_exp);
366 		return -EIO;
367 	}
368 
369 	do {
370 		val = readl(cci->base + CCI_I2C_Mm_READ_DATA(master));
371 
372 		for (i = 0; i < 4 && index < len; i++) {
373 			if (first) {
374 				/* The LS byte of this register represents the
375 				 * first byte read from the slave during a read
376 				 * access.
377 				 */
378 				first = false;
379 				continue;
380 			}
381 			buf[index++] = (val >> (i * 8)) & 0xff;
382 		}
383 	} while (--words_read);
384 
385 	return 0;
386 }
387 
cci_i2c_write(struct cci * cci,u16 master,u16 addr,u8 * buf,u16 len)388 static int cci_i2c_write(struct cci *cci, u16 master,
389 			 u16 addr, u8 *buf, u16 len)
390 {
391 	u8 queue = QUEUE_0;
392 	u8 load[12] = { 0 };
393 	int i = 0, j, ret;
394 	u32 val;
395 
396 	/*
397 	 * Call validate queue to make sure queue is empty before starting.
398 	 * This is to avoid overflow / underflow of queue.
399 	 */
400 	ret = cci_validate_queue(cci, master, queue);
401 	if (ret < 0)
402 		return ret;
403 
404 	val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4;
405 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
406 
407 	load[i++] = CCI_I2C_WRITE | len << 4;
408 
409 	for (j = 0; j < len; j++)
410 		load[i++] = buf[j];
411 
412 	for (j = 0; j < i; j += 4) {
413 		val = load[j];
414 		val |= load[j + 1] << 8;
415 		val |= load[j + 2] << 16;
416 		val |= load[j + 3] << 24;
417 		writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
418 	}
419 
420 	val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN;
421 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
422 
423 	return cci_run_queue(cci, master, queue);
424 }
425 
cci_xfer(struct i2c_adapter * adap,struct i2c_msg msgs[],int num)426 static int cci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
427 {
428 	struct cci_master *cci_master = i2c_get_adapdata(adap);
429 	struct cci *cci = cci_master->cci;
430 	int i, ret;
431 
432 	ret = pm_runtime_get_sync(cci->dev);
433 	if (ret < 0)
434 		goto err;
435 
436 	for (i = 0; i < num; i++) {
437 		if (msgs[i].flags & I2C_M_RD)
438 			ret = cci_i2c_read(cci, cci_master->master,
439 					   msgs[i].addr, msgs[i].buf,
440 					   msgs[i].len);
441 		else
442 			ret = cci_i2c_write(cci, cci_master->master,
443 					    msgs[i].addr, msgs[i].buf,
444 					    msgs[i].len);
445 
446 		if (ret < 0)
447 			break;
448 	}
449 
450 	if (!ret)
451 		ret = num;
452 
453 err:
454 	pm_runtime_mark_last_busy(cci->dev);
455 	pm_runtime_put_autosuspend(cci->dev);
456 
457 	return ret;
458 }
459 
cci_func(struct i2c_adapter * adap)460 static u32 cci_func(struct i2c_adapter *adap)
461 {
462 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
463 }
464 
465 static const struct i2c_algorithm cci_algo = {
466 	.master_xfer	= cci_xfer,
467 	.functionality	= cci_func,
468 };
469 
cci_enable_clocks(struct cci * cci)470 static int cci_enable_clocks(struct cci *cci)
471 {
472 	return clk_bulk_prepare_enable(cci->nclocks, cci->clocks);
473 }
474 
cci_disable_clocks(struct cci * cci)475 static void cci_disable_clocks(struct cci *cci)
476 {
477 	clk_bulk_disable_unprepare(cci->nclocks, cci->clocks);
478 }
479 
cci_suspend_runtime(struct device * dev)480 static int __maybe_unused cci_suspend_runtime(struct device *dev)
481 {
482 	struct cci *cci = dev_get_drvdata(dev);
483 
484 	cci_disable_clocks(cci);
485 	return 0;
486 }
487 
cci_resume_runtime(struct device * dev)488 static int __maybe_unused cci_resume_runtime(struct device *dev)
489 {
490 	struct cci *cci = dev_get_drvdata(dev);
491 	int ret;
492 
493 	ret = cci_enable_clocks(cci);
494 	if (ret)
495 		return ret;
496 
497 	cci_init(cci);
498 	return 0;
499 }
500 
cci_suspend(struct device * dev)501 static int __maybe_unused cci_suspend(struct device *dev)
502 {
503 	if (!pm_runtime_suspended(dev))
504 		return cci_suspend_runtime(dev);
505 
506 	return 0;
507 }
508 
cci_resume(struct device * dev)509 static int __maybe_unused cci_resume(struct device *dev)
510 {
511 	cci_resume_runtime(dev);
512 	pm_runtime_mark_last_busy(dev);
513 	pm_request_autosuspend(dev);
514 
515 	return 0;
516 }
517 
518 static const struct dev_pm_ops qcom_cci_pm = {
519 	SET_SYSTEM_SLEEP_PM_OPS(cci_suspend, cci_resume)
520 	SET_RUNTIME_PM_OPS(cci_suspend_runtime, cci_resume_runtime, NULL)
521 };
522 
cci_probe(struct platform_device * pdev)523 static int cci_probe(struct platform_device *pdev)
524 {
525 	struct device *dev = &pdev->dev;
526 	unsigned long cci_clk_rate = 0;
527 	struct device_node *child;
528 	struct resource *r;
529 	struct cci *cci;
530 	int ret, i;
531 	u32 val;
532 
533 	cci = devm_kzalloc(dev, sizeof(*cci), GFP_KERNEL);
534 	if (!cci)
535 		return -ENOMEM;
536 
537 	cci->dev = dev;
538 	platform_set_drvdata(pdev, cci);
539 	cci->data = device_get_match_data(dev);
540 	if (!cci->data)
541 		return -ENOENT;
542 
543 	for_each_available_child_of_node(dev->of_node, child) {
544 		struct cci_master *master;
545 		u32 idx;
546 
547 		ret = of_property_read_u32(child, "reg", &idx);
548 		if (ret) {
549 			dev_err(dev, "%pOF invalid 'reg' property", child);
550 			continue;
551 		}
552 
553 		if (idx >= cci->data->num_masters) {
554 			dev_err(dev, "%pOF invalid 'reg' value: %u (max is %u)",
555 				child, idx, cci->data->num_masters - 1);
556 			continue;
557 		}
558 
559 		master = &cci->master[idx];
560 		master->adap.quirks = &cci->data->quirks;
561 		master->adap.algo = &cci_algo;
562 		master->adap.dev.parent = dev;
563 		master->adap.dev.of_node = of_node_get(child);
564 		master->master = idx;
565 		master->cci = cci;
566 
567 		i2c_set_adapdata(&master->adap, master);
568 		snprintf(master->adap.name, sizeof(master->adap.name), "Qualcomm-CCI");
569 
570 		master->mode = I2C_MODE_STANDARD;
571 		ret = of_property_read_u32(child, "clock-frequency", &val);
572 		if (!ret) {
573 			if (val == I2C_MAX_FAST_MODE_FREQ)
574 				master->mode = I2C_MODE_FAST;
575 			else if (val == I2C_MAX_FAST_MODE_PLUS_FREQ)
576 				master->mode = I2C_MODE_FAST_PLUS;
577 		}
578 
579 		init_completion(&master->irq_complete);
580 	}
581 
582 	/* Memory */
583 
584 	cci->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
585 	if (IS_ERR(cci->base))
586 		return PTR_ERR(cci->base);
587 
588 	/* Clocks */
589 
590 	ret = devm_clk_bulk_get_all(dev, &cci->clocks);
591 	if (ret < 0)
592 		return dev_err_probe(dev, ret, "failed to get clocks\n");
593 	else if (!ret)
594 		return dev_err_probe(dev, -EINVAL, "not enough clocks in DT\n");
595 	cci->nclocks = ret;
596 
597 	/* Retrieve CCI clock rate */
598 	for (i = 0; i < cci->nclocks; i++) {
599 		if (!strcmp(cci->clocks[i].id, "cci")) {
600 			cci_clk_rate = clk_get_rate(cci->clocks[i].clk);
601 			break;
602 		}
603 	}
604 
605 	if (cci_clk_rate != cci->data->cci_clk_rate) {
606 		/* cci clock set by the bootloader or via assigned clock rate
607 		 * in DT.
608 		 */
609 		dev_warn(dev, "Found %lu cci clk rate while %lu was expected\n",
610 			 cci_clk_rate, cci->data->cci_clk_rate);
611 	}
612 
613 	ret = cci_enable_clocks(cci);
614 	if (ret < 0)
615 		return ret;
616 
617 	/* Interrupt */
618 
619 	ret = platform_get_irq(pdev, 0);
620 	if (ret < 0)
621 		goto disable_clocks;
622 	cci->irq = ret;
623 
624 	ret = devm_request_irq(dev, cci->irq, cci_isr, 0, dev_name(dev), cci);
625 	if (ret < 0) {
626 		dev_err(dev, "request_irq failed, ret: %d\n", ret);
627 		goto disable_clocks;
628 	}
629 
630 	val = readl(cci->base + CCI_HW_VERSION);
631 	dev_dbg(dev, "CCI HW version = 0x%08x", val);
632 
633 	ret = cci_reset(cci);
634 	if (ret < 0)
635 		goto error;
636 
637 	ret = cci_init(cci);
638 	if (ret < 0)
639 		goto error;
640 
641 	pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
642 	pm_runtime_use_autosuspend(dev);
643 	pm_runtime_set_active(dev);
644 	pm_runtime_enable(dev);
645 
646 	for (i = 0; i < cci->data->num_masters; i++) {
647 		if (!cci->master[i].cci)
648 			continue;
649 
650 		ret = i2c_add_adapter(&cci->master[i].adap);
651 		if (ret < 0) {
652 			of_node_put(cci->master[i].adap.dev.of_node);
653 			goto error_i2c;
654 		}
655 	}
656 
657 	return 0;
658 
659 error_i2c:
660 	pm_runtime_disable(dev);
661 	pm_runtime_dont_use_autosuspend(dev);
662 
663 	for (--i ; i >= 0; i--) {
664 		if (cci->master[i].cci) {
665 			i2c_del_adapter(&cci->master[i].adap);
666 			of_node_put(cci->master[i].adap.dev.of_node);
667 		}
668 	}
669 error:
670 	disable_irq(cci->irq);
671 disable_clocks:
672 	cci_disable_clocks(cci);
673 
674 	return ret;
675 }
676 
cci_remove(struct platform_device * pdev)677 static void cci_remove(struct platform_device *pdev)
678 {
679 	struct cci *cci = platform_get_drvdata(pdev);
680 	int i;
681 
682 	for (i = 0; i < cci->data->num_masters; i++) {
683 		if (cci->master[i].cci) {
684 			i2c_del_adapter(&cci->master[i].adap);
685 			of_node_put(cci->master[i].adap.dev.of_node);
686 		}
687 		cci_halt(cci, i);
688 	}
689 
690 	disable_irq(cci->irq);
691 	pm_runtime_disable(&pdev->dev);
692 	pm_runtime_set_suspended(&pdev->dev);
693 }
694 
695 static const struct cci_data cci_v1_data = {
696 	.num_masters = 1,
697 	.queue_size = { 64, 16 },
698 	.quirks = {
699 		.max_write_len = 10,
700 		.max_read_len = 12,
701 	},
702 	.cci_clk_rate =  19200000,
703 	.params[I2C_MODE_STANDARD] = {
704 		.thigh = 78,
705 		.tlow = 114,
706 		.tsu_sto = 28,
707 		.tsu_sta = 28,
708 		.thd_dat = 10,
709 		.thd_sta = 77,
710 		.tbuf = 118,
711 		.scl_stretch_en = 0,
712 		.trdhld = 6,
713 		.tsp = 1
714 	},
715 	.params[I2C_MODE_FAST] = {
716 		.thigh = 20,
717 		.tlow = 28,
718 		.tsu_sto = 21,
719 		.tsu_sta = 21,
720 		.thd_dat = 13,
721 		.thd_sta = 18,
722 		.tbuf = 32,
723 		.scl_stretch_en = 0,
724 		.trdhld = 6,
725 		.tsp = 3
726 	},
727 };
728 
729 static const struct cci_data cci_v1_5_data = {
730 	.num_masters = 2,
731 	.queue_size = { 64, 16 },
732 	.quirks = {
733 		.max_write_len = 10,
734 		.max_read_len = 12,
735 	},
736 	.cci_clk_rate =  19200000,
737 	.params[I2C_MODE_STANDARD] = {
738 		.thigh = 78,
739 		.tlow = 114,
740 		.tsu_sto = 28,
741 		.tsu_sta = 28,
742 		.thd_dat = 10,
743 		.thd_sta = 77,
744 		.tbuf = 118,
745 		.scl_stretch_en = 0,
746 		.trdhld = 6,
747 		.tsp = 1
748 	},
749 	.params[I2C_MODE_FAST] = {
750 		.thigh = 20,
751 		.tlow = 28,
752 		.tsu_sto = 21,
753 		.tsu_sta = 21,
754 		.thd_dat = 13,
755 		.thd_sta = 18,
756 		.tbuf = 32,
757 		.scl_stretch_en = 0,
758 		.trdhld = 6,
759 		.tsp = 3
760 	},
761 };
762 
763 static const struct cci_data cci_v2_data = {
764 	.num_masters = 2,
765 	.queue_size = { 64, 16 },
766 	.quirks = {
767 		.max_write_len = 11,
768 		.max_read_len = 12,
769 	},
770 	.cci_clk_rate =  37500000,
771 	.params[I2C_MODE_STANDARD] = {
772 		.thigh = 201,
773 		.tlow = 174,
774 		.tsu_sto = 204,
775 		.tsu_sta = 231,
776 		.thd_dat = 22,
777 		.thd_sta = 162,
778 		.tbuf = 227,
779 		.scl_stretch_en = 0,
780 		.trdhld = 6,
781 		.tsp = 3
782 	},
783 	.params[I2C_MODE_FAST] = {
784 		.thigh = 38,
785 		.tlow = 56,
786 		.tsu_sto = 40,
787 		.tsu_sta = 40,
788 		.thd_dat = 22,
789 		.thd_sta = 35,
790 		.tbuf = 62,
791 		.scl_stretch_en = 0,
792 		.trdhld = 6,
793 		.tsp = 3
794 	},
795 	.params[I2C_MODE_FAST_PLUS] = {
796 		.thigh = 16,
797 		.tlow = 22,
798 		.tsu_sto = 17,
799 		.tsu_sta = 18,
800 		.thd_dat = 16,
801 		.thd_sta = 15,
802 		.tbuf = 24,
803 		.scl_stretch_en = 0,
804 		.trdhld = 3,
805 		.tsp = 3
806 	},
807 };
808 
809 static const struct of_device_id cci_dt_match[] = {
810 	{ .compatible = "qcom,msm8226-cci", .data = &cci_v1_data},
811 	{ .compatible = "qcom,msm8974-cci", .data = &cci_v1_5_data},
812 	{ .compatible = "qcom,msm8996-cci", .data = &cci_v2_data},
813 
814 
815 	/*
816 	 * Legacy compatibles kept for backwards compatibility.
817 	 * Do not add any new ones unless they introduce a new config
818 	 */
819 	{ .compatible = "qcom,msm8916-cci", .data = &cci_v1_data},
820 	{ .compatible = "qcom,sdm845-cci", .data = &cci_v2_data},
821 	{ .compatible = "qcom,sm8250-cci", .data = &cci_v2_data},
822 	{ .compatible = "qcom,sm8450-cci", .data = &cci_v2_data},
823 	{}
824 };
825 MODULE_DEVICE_TABLE(of, cci_dt_match);
826 
827 static struct platform_driver qcom_cci_driver = {
828 	.probe  = cci_probe,
829 	.remove_new = cci_remove,
830 	.driver = {
831 		.name = "i2c-qcom-cci",
832 		.of_match_table = cci_dt_match,
833 		.pm = &qcom_cci_pm,
834 	},
835 };
836 
837 module_platform_driver(qcom_cci_driver);
838 
839 MODULE_DESCRIPTION("Qualcomm Camera Control Interface driver");
840 MODULE_AUTHOR("Todor Tomov <todor.tomov@linaro.org>");
841 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
842 MODULE_LICENSE("GPL v2");
843