• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Synopsys DesignWare I2C adapter driver (master only).
4  *
5  * Based on the TI DAVINCI I2C adapter driver.
6  *
7  * Copyright (C) 2006 Texas Instruments.
8  * Copyright (C) 2007 MontaVista Software Inc.
9  * Copyright (C) 2009 Provigent Ltd.
10  */
11 
12 #define DEFAULT_SYMBOL_NAMESPACE	"I2C_DW"
13 
14 #include <linux/delay.h>
15 #include <linux/err.h>
16 #include <linux/errno.h>
17 #include <linux/export.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/i2c.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/module.h>
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/regmap.h>
26 #include <linux/reset.h>
27 
28 #include "i2c-designware-core.h"
29 
30 #define AMD_TIMEOUT_MIN_US	25
31 #define AMD_TIMEOUT_MAX_US	250
32 #define AMD_MASTERCFG_MASK	GENMASK(15, 0)
33 
i2c_dw_configure_fifo_master(struct dw_i2c_dev * dev)34 static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
35 {
36 	/* Configure Tx/Rx FIFO threshold levels */
37 	regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2);
38 	regmap_write(dev->map, DW_IC_RX_TL, 0);
39 
40 	/* Configure the I2C master */
41 	regmap_write(dev->map, DW_IC_CON, dev->master_cfg);
42 }
43 
i2c_dw_set_timings_master(struct dw_i2c_dev * dev)44 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
45 {
46 	unsigned int comp_param1;
47 	u32 sda_falling_time, scl_falling_time;
48 	struct i2c_timings *t = &dev->timings;
49 	const char *fp_str = "";
50 	u32 ic_clk;
51 	int ret;
52 
53 	ret = i2c_dw_acquire_lock(dev);
54 	if (ret)
55 		return ret;
56 
57 	ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1);
58 	i2c_dw_release_lock(dev);
59 	if (ret)
60 		return ret;
61 
62 	/* Set standard and fast speed dividers for high/low periods */
63 	sda_falling_time = t->sda_fall_ns ?: 300; /* ns */
64 	scl_falling_time = t->scl_fall_ns ?: 300; /* ns */
65 
66 	/* Calculate SCL timing parameters for standard mode if not set */
67 	if (!dev->ss_hcnt || !dev->ss_lcnt) {
68 		ic_clk = i2c_dw_clk_rate(dev);
69 		dev->ss_hcnt =
70 			i2c_dw_scl_hcnt(dev,
71 					DW_IC_SS_SCL_HCNT,
72 					ic_clk,
73 					4000,	/* tHD;STA = tHIGH = 4.0 us */
74 					sda_falling_time,
75 					0,	/* 0: DW default, 1: Ideal */
76 					0);	/* No offset */
77 		dev->ss_lcnt =
78 			i2c_dw_scl_lcnt(dev,
79 					DW_IC_SS_SCL_LCNT,
80 					ic_clk,
81 					4700,	/* tLOW = 4.7 us */
82 					scl_falling_time,
83 					0);	/* No offset */
84 	}
85 	dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n",
86 		dev->ss_hcnt, dev->ss_lcnt);
87 
88 	/*
89 	 * Set SCL timing parameters for fast mode or fast mode plus. Only
90 	 * difference is the timing parameter values since the registers are
91 	 * the same.
92 	 */
93 	if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) {
94 		/*
95 		 * Check are Fast Mode Plus parameters available. Calculate
96 		 * SCL timing parameters for Fast Mode Plus if not set.
97 		 */
98 		if (dev->fp_hcnt && dev->fp_lcnt) {
99 			dev->fs_hcnt = dev->fp_hcnt;
100 			dev->fs_lcnt = dev->fp_lcnt;
101 		} else {
102 			ic_clk = i2c_dw_clk_rate(dev);
103 			dev->fs_hcnt =
104 				i2c_dw_scl_hcnt(dev,
105 						DW_IC_FS_SCL_HCNT,
106 						ic_clk,
107 						260,	/* tHIGH = 260 ns */
108 						sda_falling_time,
109 						0,	/* DW default */
110 						0);	/* No offset */
111 			dev->fs_lcnt =
112 				i2c_dw_scl_lcnt(dev,
113 						DW_IC_FS_SCL_LCNT,
114 						ic_clk,
115 						500,	/* tLOW = 500 ns */
116 						scl_falling_time,
117 						0);	/* No offset */
118 		}
119 		fp_str = " Plus";
120 	}
121 	/*
122 	 * Calculate SCL timing parameters for fast mode if not set. They are
123 	 * needed also in high speed mode.
124 	 */
125 	if (!dev->fs_hcnt || !dev->fs_lcnt) {
126 		ic_clk = i2c_dw_clk_rate(dev);
127 		dev->fs_hcnt =
128 			i2c_dw_scl_hcnt(dev,
129 					DW_IC_FS_SCL_HCNT,
130 					ic_clk,
131 					600,	/* tHD;STA = tHIGH = 0.6 us */
132 					sda_falling_time,
133 					0,	/* 0: DW default, 1: Ideal */
134 					0);	/* No offset */
135 		dev->fs_lcnt =
136 			i2c_dw_scl_lcnt(dev,
137 					DW_IC_FS_SCL_LCNT,
138 					ic_clk,
139 					1300,	/* tLOW = 1.3 us */
140 					scl_falling_time,
141 					0);	/* No offset */
142 	}
143 	dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n",
144 		fp_str, dev->fs_hcnt, dev->fs_lcnt);
145 
146 	/* Check is high speed possible and fall back to fast mode if not */
147 	if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
148 		DW_IC_CON_SPEED_HIGH) {
149 		if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
150 			!= DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
151 			dev_err(dev->dev, "High Speed not supported!\n");
152 			t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
153 			dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
154 			dev->master_cfg |= DW_IC_CON_SPEED_FAST;
155 			dev->hs_hcnt = 0;
156 			dev->hs_lcnt = 0;
157 		} else if (!dev->hs_hcnt || !dev->hs_lcnt) {
158 			ic_clk = i2c_dw_clk_rate(dev);
159 			dev->hs_hcnt =
160 				i2c_dw_scl_hcnt(dev,
161 						DW_IC_HS_SCL_HCNT,
162 						ic_clk,
163 						160,	/* tHIGH = 160 ns */
164 						sda_falling_time,
165 						0,	/* DW default */
166 						0);	/* No offset */
167 			dev->hs_lcnt =
168 				i2c_dw_scl_lcnt(dev,
169 						DW_IC_HS_SCL_LCNT,
170 						ic_clk,
171 						320,	/* tLOW = 320 ns */
172 						scl_falling_time,
173 						0);	/* No offset */
174 		}
175 		dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n",
176 			dev->hs_hcnt, dev->hs_lcnt);
177 	}
178 
179 	ret = i2c_dw_set_sda_hold(dev);
180 	if (ret)
181 		return ret;
182 
183 	dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz));
184 	return 0;
185 }
186 
187 /**
188  * i2c_dw_init_master() - Initialize the designware I2C master hardware
189  * @dev: device private data
190  *
191  * This functions configures and enables the I2C master.
192  * This function is called during I2C init function, and in case of timeout at
193  * run time.
194  */
i2c_dw_init_master(struct dw_i2c_dev * dev)195 static int i2c_dw_init_master(struct dw_i2c_dev *dev)
196 {
197 	int ret;
198 
199 	ret = i2c_dw_acquire_lock(dev);
200 	if (ret)
201 		return ret;
202 
203 	/* Disable the adapter */
204 	__i2c_dw_disable(dev);
205 
206 	/* Write standard speed timing parameters */
207 	regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt);
208 	regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt);
209 
210 	/* Write fast mode/fast mode plus timing parameters */
211 	regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt);
212 	regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt);
213 
214 	/* Write high speed timing parameters if supported */
215 	if (dev->hs_hcnt && dev->hs_lcnt) {
216 		regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt);
217 		regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt);
218 	}
219 
220 	/* Write SDA hold time if supported */
221 	if (dev->sda_hold_time)
222 		regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time);
223 
224 	i2c_dw_configure_fifo_master(dev);
225 	i2c_dw_release_lock(dev);
226 
227 	return 0;
228 }
229 
i2c_dw_xfer_init(struct dw_i2c_dev * dev)230 static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
231 {
232 	struct i2c_msg *msgs = dev->msgs;
233 	u32 ic_con = 0, ic_tar = 0;
234 	unsigned int dummy;
235 
236 	/* Disable the adapter */
237 	__i2c_dw_disable(dev);
238 
239 	/* If the slave address is ten bit address, enable 10BITADDR */
240 	if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
241 		ic_con = DW_IC_CON_10BITADDR_MASTER;
242 		/*
243 		 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
244 		 * mode has to be enabled via bit 12 of IC_TAR register.
245 		 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
246 		 * detected from registers.
247 		 */
248 		ic_tar = DW_IC_TAR_10BITADDR_MASTER;
249 	}
250 
251 	regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER,
252 			   ic_con);
253 
254 	/*
255 	 * Set the slave (target) address and enable 10-bit addressing mode
256 	 * if applicable.
257 	 */
258 	regmap_write(dev->map, DW_IC_TAR,
259 		     msgs[dev->msg_write_idx].addr | ic_tar);
260 
261 	/* Enforce disabled interrupts (due to HW issues) */
262 	__i2c_dw_write_intr_mask(dev, 0);
263 
264 	/* Enable the adapter */
265 	__i2c_dw_enable(dev);
266 
267 	/* Dummy read to avoid the register getting stuck on Bay Trail */
268 	regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy);
269 
270 	/* Clear and enable interrupts */
271 	regmap_read(dev->map, DW_IC_CLR_INTR, &dummy);
272 	__i2c_dw_write_intr_mask(dev, DW_IC_INTR_MASTER_MASK);
273 }
274 
275 /*
276  * This function waits for the controller to be idle before disabling I2C
277  * When the controller is not in the IDLE state, the MST_ACTIVITY bit
278  * (IC_STATUS[5]) is set.
279  *
280  * Values:
281  * 0x1 (ACTIVE): Controller not idle
282  * 0x0 (IDLE): Controller is idle
283  *
284  * The function is called after completing the current transfer.
285  *
286  * Returns:
287  * False when the controller is in the IDLE state.
288  * True when the controller is in the ACTIVE state.
289  */
i2c_dw_is_controller_active(struct dw_i2c_dev * dev)290 static bool i2c_dw_is_controller_active(struct dw_i2c_dev *dev)
291 {
292 	u32 status;
293 
294 	regmap_read(dev->map, DW_IC_STATUS, &status);
295 	if (!(status & DW_IC_STATUS_MASTER_ACTIVITY))
296 		return false;
297 
298 	return regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status,
299 				       !(status & DW_IC_STATUS_MASTER_ACTIVITY),
300 				       1100, 20000) != 0;
301 }
302 
i2c_dw_check_stopbit(struct dw_i2c_dev * dev)303 static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
304 {
305 	u32 val;
306 	int ret;
307 
308 	ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val,
309 				       !(val & DW_IC_INTR_STOP_DET),
310 					1100, 20000);
311 	if (ret)
312 		dev_err(dev->dev, "i2c timeout error %d\n", ret);
313 
314 	return ret;
315 }
316 
i2c_dw_status(struct dw_i2c_dev * dev)317 static int i2c_dw_status(struct dw_i2c_dev *dev)
318 {
319 	int status;
320 
321 	status = i2c_dw_wait_bus_not_busy(dev);
322 	if (status)
323 		return status;
324 
325 	return i2c_dw_check_stopbit(dev);
326 }
327 
328 /*
329  * Initiate and continue master read/write transaction with polling
330  * based transfer routine afterward write messages into the Tx buffer.
331  */
amd_i2c_dw_xfer_quirk(struct i2c_adapter * adap,struct i2c_msg * msgs,int num_msgs)332 static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs)
333 {
334 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
335 	int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx;
336 	int cmd = 0, status;
337 	u8 *tx_buf;
338 	unsigned int val;
339 
340 	/*
341 	 * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card,
342 	 * it is mandatory to set the right value in specific register
343 	 * (offset:0x474) as per the hardware IP specification.
344 	 */
345 	regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN);
346 
347 	dev->msgs = msgs;
348 	dev->msgs_num = num_msgs;
349 	dev->msg_write_idx = 0;
350 	i2c_dw_xfer_init(dev);
351 
352 	/* Initiate messages read/write transaction */
353 	for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) {
354 		tx_buf = msgs[msg_wrt_idx].buf;
355 		buf_len = msgs[msg_wrt_idx].len;
356 
357 		if (!(msgs[msg_wrt_idx].flags & I2C_M_RD))
358 			regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1);
359 		/*
360 		 * Initiate the i2c read/write transaction of buffer length,
361 		 * and poll for bus busy status. For the last message transfer,
362 		 * update the command with stopbit enable.
363 		 */
364 		for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) {
365 			if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1)
366 				cmd |= BIT(9);
367 
368 			if (msgs[msg_wrt_idx].flags & I2C_M_RD) {
369 				/* Due to hardware bug, need to write the same command twice. */
370 				regmap_write(dev->map, DW_IC_DATA_CMD, 0x100);
371 				regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd);
372 				if (cmd) {
373 					regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1));
374 					regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1));
375 					/*
376 					 * Need to check the stop bit. However, it cannot be
377 					 * detected from the registers so we check it always
378 					 * when read/write the last byte.
379 					 */
380 					status = i2c_dw_status(dev);
381 					if (status)
382 						return status;
383 
384 					for (data_idx = 0; data_idx < buf_len; data_idx++) {
385 						regmap_read(dev->map, DW_IC_DATA_CMD, &val);
386 						tx_buf[data_idx] = val;
387 					}
388 					status = i2c_dw_check_stopbit(dev);
389 					if (status)
390 						return status;
391 				}
392 			} else {
393 				regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd);
394 				usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US);
395 			}
396 		}
397 		status = i2c_dw_check_stopbit(dev);
398 		if (status)
399 			return status;
400 	}
401 
402 	return 0;
403 }
404 
405 /*
406  * Initiate (and continue) low level master read/write transaction.
407  * This function is only called from i2c_dw_isr, and pumping i2c_msg
408  * messages into the tx buffer.  Even if the size of i2c_msg data is
409  * longer than the size of the tx buffer, it handles everything.
410  */
411 static void
i2c_dw_xfer_msg(struct dw_i2c_dev * dev)412 i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
413 {
414 	struct i2c_msg *msgs = dev->msgs;
415 	u32 intr_mask;
416 	int tx_limit, rx_limit;
417 	u32 addr = msgs[dev->msg_write_idx].addr;
418 	u32 buf_len = dev->tx_buf_len;
419 	u8 *buf = dev->tx_buf;
420 	bool need_restart = false;
421 	unsigned int flr;
422 
423 	intr_mask = DW_IC_INTR_MASTER_MASK;
424 
425 	for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
426 		u32 flags = msgs[dev->msg_write_idx].flags;
427 
428 		/*
429 		 * If target address has changed, we need to
430 		 * reprogram the target address in the I2C
431 		 * adapter when we are done with this transfer.
432 		 */
433 		if (msgs[dev->msg_write_idx].addr != addr) {
434 			dev_err(dev->dev,
435 				"%s: invalid target address\n", __func__);
436 			dev->msg_err = -EINVAL;
437 			break;
438 		}
439 
440 		if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
441 			/* new i2c_msg */
442 			buf = msgs[dev->msg_write_idx].buf;
443 			buf_len = msgs[dev->msg_write_idx].len;
444 
445 			/* If both IC_EMPTYFIFO_HOLD_MASTER_EN and
446 			 * IC_RESTART_EN are set, we must manually
447 			 * set restart bit between messages.
448 			 */
449 			if ((dev->master_cfg & DW_IC_CON_RESTART_EN) &&
450 					(dev->msg_write_idx > 0))
451 				need_restart = true;
452 		}
453 
454 		regmap_read(dev->map, DW_IC_TXFLR, &flr);
455 		tx_limit = dev->tx_fifo_depth - flr;
456 
457 		regmap_read(dev->map, DW_IC_RXFLR, &flr);
458 		rx_limit = dev->rx_fifo_depth - flr;
459 
460 		while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
461 			u32 cmd = 0;
462 
463 			/*
464 			 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
465 			 * manually set the stop bit. However, it cannot be
466 			 * detected from the registers so we set it always
467 			 * when writing/reading the last byte.
468 			 */
469 
470 			/*
471 			 * i2c-core always sets the buffer length of
472 			 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will
473 			 * be adjusted when receiving the first byte.
474 			 * Thus we can't stop the transaction here.
475 			 */
476 			if (dev->msg_write_idx == dev->msgs_num - 1 &&
477 			    buf_len == 1 && !(flags & I2C_M_RECV_LEN))
478 				cmd |= BIT(9);
479 
480 			if (need_restart) {
481 				cmd |= BIT(10);
482 				need_restart = false;
483 			}
484 
485 			if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
486 
487 				/* Avoid rx buffer overrun */
488 				if (dev->rx_outstanding >= dev->rx_fifo_depth)
489 					break;
490 
491 				regmap_write(dev->map, DW_IC_DATA_CMD,
492 					     cmd | 0x100);
493 				rx_limit--;
494 				dev->rx_outstanding++;
495 			} else {
496 				regmap_write(dev->map, DW_IC_DATA_CMD,
497 					     cmd | *buf++);
498 			}
499 			tx_limit--; buf_len--;
500 		}
501 
502 		dev->tx_buf = buf;
503 		dev->tx_buf_len = buf_len;
504 
505 		/*
506 		 * Because we don't know the buffer length in the
507 		 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
508 		 * transaction here. Also disable the TX_EMPTY IRQ
509 		 * while waiting for the data length byte to avoid the
510 		 * bogus interrupts flood.
511 		 */
512 		if (flags & I2C_M_RECV_LEN) {
513 			dev->status |= STATUS_WRITE_IN_PROGRESS;
514 			intr_mask &= ~DW_IC_INTR_TX_EMPTY;
515 			break;
516 		} else if (buf_len > 0) {
517 			/* more bytes to be written */
518 			dev->status |= STATUS_WRITE_IN_PROGRESS;
519 			break;
520 		} else
521 			dev->status &= ~STATUS_WRITE_IN_PROGRESS;
522 	}
523 
524 	/*
525 	 * If i2c_msg index search is completed, we don't need TX_EMPTY
526 	 * interrupt any more.
527 	 */
528 	if (dev->msg_write_idx == dev->msgs_num)
529 		intr_mask &= ~DW_IC_INTR_TX_EMPTY;
530 
531 	if (dev->msg_err)
532 		intr_mask = 0;
533 
534 	__i2c_dw_write_intr_mask(dev, intr_mask);
535 }
536 
537 static u8
i2c_dw_recv_len(struct dw_i2c_dev * dev,u8 len)538 i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
539 {
540 	struct i2c_msg *msgs = dev->msgs;
541 	u32 flags = msgs[dev->msg_read_idx].flags;
542 	unsigned int intr_mask;
543 
544 	/*
545 	 * Adjust the buffer length and mask the flag
546 	 * after receiving the first byte.
547 	 */
548 	len += (flags & I2C_CLIENT_PEC) ? 2 : 1;
549 	dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding);
550 	msgs[dev->msg_read_idx].len = len;
551 	msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
552 
553 	/*
554 	 * Received buffer length, re-enable TX_EMPTY interrupt
555 	 * to resume the SMBUS transaction.
556 	 */
557 	__i2c_dw_read_intr_mask(dev, &intr_mask);
558 	intr_mask |= DW_IC_INTR_TX_EMPTY;
559 	__i2c_dw_write_intr_mask(dev, intr_mask);
560 
561 	return len;
562 }
563 
564 static void
i2c_dw_read(struct dw_i2c_dev * dev)565 i2c_dw_read(struct dw_i2c_dev *dev)
566 {
567 	struct i2c_msg *msgs = dev->msgs;
568 	unsigned int rx_valid;
569 
570 	for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) {
571 		unsigned int tmp;
572 		u32 len;
573 		u8 *buf;
574 
575 		if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
576 			continue;
577 
578 		if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
579 			len = msgs[dev->msg_read_idx].len;
580 			buf = msgs[dev->msg_read_idx].buf;
581 		} else {
582 			len = dev->rx_buf_len;
583 			buf = dev->rx_buf;
584 		}
585 
586 		regmap_read(dev->map, DW_IC_RXFLR, &rx_valid);
587 
588 		for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
589 			u32 flags = msgs[dev->msg_read_idx].flags;
590 
591 			regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
592 			tmp &= DW_IC_DATA_CMD_DAT;
593 			/* Ensure length byte is a valid value */
594 			if (flags & I2C_M_RECV_LEN) {
595 				/*
596 				 * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be
597 				 * detected from the registers, the controller can be
598 				 * disabled if the STOP bit is set. But it is only set
599 				 * after receiving block data response length in
600 				 * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read
601 				 * another byte with STOP bit set when the block data
602 				 * response length is invalid to complete the transaction.
603 				 */
604 				if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX)
605 					tmp = 1;
606 
607 				len = i2c_dw_recv_len(dev, tmp);
608 			}
609 			*buf++ = tmp;
610 			dev->rx_outstanding--;
611 		}
612 
613 		if (len > 0) {
614 			dev->status |= STATUS_READ_IN_PROGRESS;
615 			dev->rx_buf_len = len;
616 			dev->rx_buf = buf;
617 			return;
618 		} else
619 			dev->status &= ~STATUS_READ_IN_PROGRESS;
620 	}
621 }
622 
i2c_dw_read_clear_intrbits(struct dw_i2c_dev * dev)623 static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
624 {
625 	unsigned int stat, dummy;
626 
627 	/*
628 	 * The IC_INTR_STAT register just indicates "enabled" interrupts.
629 	 * The unmasked raw version of interrupt status bits is available
630 	 * in the IC_RAW_INTR_STAT register.
631 	 *
632 	 * That is,
633 	 *   stat = readl(IC_INTR_STAT);
634 	 * equals to,
635 	 *   stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
636 	 *
637 	 * The raw version might be useful for debugging purposes.
638 	 */
639 	if (!(dev->flags & ACCESS_POLLING)) {
640 		regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
641 	} else {
642 		regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
643 		stat &= dev->sw_mask;
644 	}
645 
646 	/*
647 	 * Do not use the IC_CLR_INTR register to clear interrupts, or
648 	 * you'll miss some interrupts, triggered during the period from
649 	 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
650 	 *
651 	 * Instead, use the separately-prepared IC_CLR_* registers.
652 	 */
653 	if (stat & DW_IC_INTR_RX_UNDER)
654 		regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy);
655 	if (stat & DW_IC_INTR_RX_OVER)
656 		regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy);
657 	if (stat & DW_IC_INTR_TX_OVER)
658 		regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy);
659 	if (stat & DW_IC_INTR_RD_REQ)
660 		regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy);
661 	if (stat & DW_IC_INTR_TX_ABRT) {
662 		/*
663 		 * The IC_TX_ABRT_SOURCE register is cleared whenever
664 		 * the IC_CLR_TX_ABRT is read.  Preserve it beforehand.
665 		 */
666 		regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source);
667 		regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy);
668 	}
669 	if (stat & DW_IC_INTR_RX_DONE)
670 		regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
671 	if (stat & DW_IC_INTR_ACTIVITY)
672 		regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
673 	if ((stat & DW_IC_INTR_STOP_DET) &&
674 	    ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL)))
675 		regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
676 	if (stat & DW_IC_INTR_START_DET)
677 		regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
678 	if (stat & DW_IC_INTR_GEN_CALL)
679 		regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy);
680 
681 	return stat;
682 }
683 
i2c_dw_process_transfer(struct dw_i2c_dev * dev,unsigned int stat)684 static void i2c_dw_process_transfer(struct dw_i2c_dev *dev, unsigned int stat)
685 {
686 	if (stat & DW_IC_INTR_TX_ABRT) {
687 		dev->cmd_err |= DW_IC_ERR_TX_ABRT;
688 		dev->status &= ~STATUS_MASK;
689 		dev->rx_outstanding = 0;
690 
691 		/*
692 		 * Anytime TX_ABRT is set, the contents of the tx/rx
693 		 * buffers are flushed. Make sure to skip them.
694 		 */
695 		__i2c_dw_write_intr_mask(dev, 0);
696 		goto tx_aborted;
697 	}
698 
699 	if (stat & DW_IC_INTR_RX_FULL)
700 		i2c_dw_read(dev);
701 
702 	if (stat & DW_IC_INTR_TX_EMPTY)
703 		i2c_dw_xfer_msg(dev);
704 
705 	/*
706 	 * No need to modify or disable the interrupt mask here.
707 	 * i2c_dw_xfer_msg() will take care of it according to
708 	 * the current transmit status.
709 	 */
710 
711 tx_aborted:
712 	if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) &&
713 	     (dev->rx_outstanding == 0))
714 		complete(&dev->cmd_complete);
715 	else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
716 		/* Workaround to trigger pending interrupt */
717 		__i2c_dw_read_intr_mask(dev, &stat);
718 		__i2c_dw_write_intr_mask(dev, 0);
719 		__i2c_dw_write_intr_mask(dev, stat);
720 	}
721 }
722 
723 /*
724  * Interrupt service routine. This gets called whenever an I2C master interrupt
725  * occurs.
726  */
i2c_dw_isr(int this_irq,void * dev_id)727 static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
728 {
729 	struct dw_i2c_dev *dev = dev_id;
730 	unsigned int stat, enabled;
731 
732 	regmap_read(dev->map, DW_IC_ENABLE, &enabled);
733 	regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
734 	if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
735 		return IRQ_NONE;
736 	if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0))
737 		return IRQ_NONE;
738 	dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat);
739 
740 	stat = i2c_dw_read_clear_intrbits(dev);
741 
742 	if (!(dev->status & STATUS_ACTIVE)) {
743 		/*
744 		 * Unexpected interrupt in driver point of view. State
745 		 * variables are either unset or stale so acknowledge and
746 		 * disable interrupts for suppressing further interrupts if
747 		 * interrupt really came from this HW (E.g. firmware has left
748 		 * the HW active).
749 		 */
750 		__i2c_dw_write_intr_mask(dev, 0);
751 		return IRQ_HANDLED;
752 	}
753 
754 	i2c_dw_process_transfer(dev, stat);
755 
756 	return IRQ_HANDLED;
757 }
758 
i2c_dw_wait_transfer(struct dw_i2c_dev * dev)759 static int i2c_dw_wait_transfer(struct dw_i2c_dev *dev)
760 {
761 	unsigned long timeout = dev->adapter.timeout;
762 	unsigned int stat;
763 	int ret;
764 
765 	if (!(dev->flags & ACCESS_POLLING)) {
766 		ret = wait_for_completion_timeout(&dev->cmd_complete, timeout);
767 	} else {
768 		timeout += jiffies;
769 		do {
770 			ret = try_wait_for_completion(&dev->cmd_complete);
771 			if (ret)
772 				break;
773 
774 			stat = i2c_dw_read_clear_intrbits(dev);
775 			if (stat)
776 				i2c_dw_process_transfer(dev, stat);
777 			else
778 				/* Try save some power */
779 				usleep_range(3, 25);
780 		} while (time_before(jiffies, timeout));
781 	}
782 
783 	return ret ? 0 : -ETIMEDOUT;
784 }
785 
786 /*
787  * Prepare controller for a transaction and call i2c_dw_xfer_msg.
788  */
789 static int
i2c_dw_xfer(struct i2c_adapter * adap,struct i2c_msg msgs[],int num)790 i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
791 {
792 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
793 	int ret;
794 
795 	dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
796 
797 	pm_runtime_get_sync(dev->dev);
798 
799 	switch (dev->flags & MODEL_MASK) {
800 	case MODEL_AMD_NAVI_GPU:
801 		ret = amd_i2c_dw_xfer_quirk(adap, msgs, num);
802 		goto done_nolock;
803 	default:
804 		break;
805 	}
806 
807 	reinit_completion(&dev->cmd_complete);
808 	dev->msgs = msgs;
809 	dev->msgs_num = num;
810 	dev->cmd_err = 0;
811 	dev->msg_write_idx = 0;
812 	dev->msg_read_idx = 0;
813 	dev->msg_err = 0;
814 	dev->status = 0;
815 	dev->abort_source = 0;
816 	dev->rx_outstanding = 0;
817 
818 	ret = i2c_dw_acquire_lock(dev);
819 	if (ret)
820 		goto done_nolock;
821 
822 	ret = i2c_dw_wait_bus_not_busy(dev);
823 	if (ret < 0)
824 		goto done;
825 
826 	/* Start the transfers */
827 	i2c_dw_xfer_init(dev);
828 
829 	/* Wait for tx to complete */
830 	ret = i2c_dw_wait_transfer(dev);
831 	if (ret) {
832 		dev_err(dev->dev, "controller timed out\n");
833 		/* i2c_dw_init_master() implicitly disables the adapter */
834 		i2c_recover_bus(&dev->adapter);
835 		i2c_dw_init_master(dev);
836 		goto done;
837 	}
838 
839 	/*
840 	 * This happens rarely (~1:500) and is hard to reproduce. Debug trace
841 	 * showed that IC_STATUS had value of 0x23 when STOP_DET occurred,
842 	 * if disable IC_ENABLE.ENABLE immediately that can result in
843 	 * IC_RAW_INTR_STAT.MASTER_ON_HOLD holding SCL low. Check if
844 	 * controller is still ACTIVE before disabling I2C.
845 	 */
846 	if (i2c_dw_is_controller_active(dev))
847 		dev_err(dev->dev, "controller active\n");
848 
849 	/*
850 	 * We must disable the adapter before returning and signaling the end
851 	 * of the current transfer. Otherwise the hardware might continue
852 	 * generating interrupts which in turn causes a race condition with
853 	 * the following transfer. Needs some more investigation if the
854 	 * additional interrupts are a hardware bug or this driver doesn't
855 	 * handle them correctly yet.
856 	 */
857 	__i2c_dw_disable_nowait(dev);
858 
859 	if (dev->msg_err) {
860 		ret = dev->msg_err;
861 		goto done;
862 	}
863 
864 	/* No error */
865 	if (likely(!dev->cmd_err && !dev->status)) {
866 		ret = num;
867 		goto done;
868 	}
869 
870 	/* We have an error */
871 	if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
872 		ret = i2c_dw_handle_tx_abort(dev);
873 		goto done;
874 	}
875 
876 	if (dev->status)
877 		dev_err(dev->dev,
878 			"transfer terminated early - interrupt latency too high?\n");
879 
880 	ret = -EIO;
881 
882 done:
883 	i2c_dw_release_lock(dev);
884 
885 done_nolock:
886 	pm_runtime_mark_last_busy(dev->dev);
887 	pm_runtime_put_autosuspend(dev->dev);
888 
889 	return ret;
890 }
891 
892 static const struct i2c_algorithm i2c_dw_algo = {
893 	.master_xfer = i2c_dw_xfer,
894 	.functionality = i2c_dw_func,
895 };
896 
897 static const struct i2c_adapter_quirks i2c_dw_quirks = {
898 	.flags = I2C_AQ_NO_ZERO_LEN,
899 };
900 
i2c_dw_configure_master(struct dw_i2c_dev * dev)901 void i2c_dw_configure_master(struct dw_i2c_dev *dev)
902 {
903 	struct i2c_timings *t = &dev->timings;
904 
905 	dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
906 
907 	dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
908 			  DW_IC_CON_RESTART_EN;
909 
910 	dev->mode = DW_IC_MASTER;
911 
912 	switch (t->bus_freq_hz) {
913 	case I2C_MAX_STANDARD_MODE_FREQ:
914 		dev->master_cfg |= DW_IC_CON_SPEED_STD;
915 		break;
916 	case I2C_MAX_HIGH_SPEED_MODE_FREQ:
917 		dev->master_cfg |= DW_IC_CON_SPEED_HIGH;
918 		break;
919 	default:
920 		dev->master_cfg |= DW_IC_CON_SPEED_FAST;
921 	}
922 }
923 EXPORT_SYMBOL_GPL(i2c_dw_configure_master);
924 
i2c_dw_prepare_recovery(struct i2c_adapter * adap)925 static void i2c_dw_prepare_recovery(struct i2c_adapter *adap)
926 {
927 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
928 
929 	i2c_dw_disable(dev);
930 	reset_control_assert(dev->rst);
931 	i2c_dw_prepare_clk(dev, false);
932 }
933 
i2c_dw_unprepare_recovery(struct i2c_adapter * adap)934 static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap)
935 {
936 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
937 
938 	i2c_dw_prepare_clk(dev, true);
939 	reset_control_deassert(dev->rst);
940 	i2c_dw_init_master(dev);
941 }
942 
i2c_dw_init_recovery_info(struct dw_i2c_dev * dev)943 static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
944 {
945 	struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
946 	struct i2c_adapter *adap = &dev->adapter;
947 	struct gpio_desc *gpio;
948 
949 	gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH);
950 	if (IS_ERR_OR_NULL(gpio))
951 		return PTR_ERR_OR_ZERO(gpio);
952 
953 	rinfo->scl_gpiod = gpio;
954 
955 	gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN);
956 	if (IS_ERR(gpio))
957 		return PTR_ERR(gpio);
958 	rinfo->sda_gpiod = gpio;
959 
960 	rinfo->pinctrl = devm_pinctrl_get(dev->dev);
961 	if (IS_ERR(rinfo->pinctrl)) {
962 		if (PTR_ERR(rinfo->pinctrl) == -EPROBE_DEFER)
963 			return PTR_ERR(rinfo->pinctrl);
964 
965 		rinfo->pinctrl = NULL;
966 		dev_err(dev->dev, "getting pinctrl info failed: bus recovery might not work\n");
967 	} else if (!rinfo->pinctrl) {
968 		dev_dbg(dev->dev, "pinctrl is disabled, bus recovery might not work\n");
969 	}
970 
971 	rinfo->recover_bus = i2c_generic_scl_recovery;
972 	rinfo->prepare_recovery = i2c_dw_prepare_recovery;
973 	rinfo->unprepare_recovery = i2c_dw_unprepare_recovery;
974 	adap->bus_recovery_info = rinfo;
975 
976 	dev_info(dev->dev, "running with gpio recovery mode! scl%s",
977 		 rinfo->sda_gpiod ? ",sda" : "");
978 
979 	return 0;
980 }
981 
i2c_dw_probe_master(struct dw_i2c_dev * dev)982 int i2c_dw_probe_master(struct dw_i2c_dev *dev)
983 {
984 	struct i2c_adapter *adap = &dev->adapter;
985 	unsigned long irq_flags;
986 	unsigned int ic_con;
987 	int ret;
988 
989 	init_completion(&dev->cmd_complete);
990 
991 	dev->init = i2c_dw_init_master;
992 
993 	ret = i2c_dw_init_regmap(dev);
994 	if (ret)
995 		return ret;
996 
997 	ret = i2c_dw_set_timings_master(dev);
998 	if (ret)
999 		return ret;
1000 
1001 	ret = i2c_dw_set_fifo_size(dev);
1002 	if (ret)
1003 		return ret;
1004 
1005 	/* Lock the bus for accessing DW_IC_CON */
1006 	ret = i2c_dw_acquire_lock(dev);
1007 	if (ret)
1008 		return ret;
1009 
1010 	/*
1011 	 * On AMD platforms BIOS advertises the bus clear feature
1012 	 * and enables the SCL/SDA stuck low. SMU FW does the
1013 	 * bus recovery process. Driver should not ignore this BIOS
1014 	 * advertisement of bus clear feature.
1015 	 */
1016 	ret = regmap_read(dev->map, DW_IC_CON, &ic_con);
1017 	i2c_dw_release_lock(dev);
1018 	if (ret)
1019 		return ret;
1020 
1021 	if (ic_con & DW_IC_CON_BUS_CLEAR_CTRL)
1022 		dev->master_cfg |= DW_IC_CON_BUS_CLEAR_CTRL;
1023 
1024 	ret = dev->init(dev);
1025 	if (ret)
1026 		return ret;
1027 
1028 	snprintf(adap->name, sizeof(adap->name),
1029 		 "Synopsys DesignWare I2C adapter");
1030 	adap->retries = 3;
1031 	adap->algo = &i2c_dw_algo;
1032 	adap->quirks = &i2c_dw_quirks;
1033 	adap->dev.parent = dev->dev;
1034 	i2c_set_adapdata(adap, dev);
1035 
1036 	if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
1037 		irq_flags = IRQF_NO_SUSPEND;
1038 	} else {
1039 		irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND;
1040 	}
1041 
1042 	ret = i2c_dw_acquire_lock(dev);
1043 	if (ret)
1044 		return ret;
1045 
1046 	__i2c_dw_write_intr_mask(dev, 0);
1047 	i2c_dw_release_lock(dev);
1048 
1049 	if (!(dev->flags & ACCESS_POLLING)) {
1050 		ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr,
1051 				       irq_flags, dev_name(dev->dev), dev);
1052 		if (ret) {
1053 			dev_err(dev->dev, "failure requesting irq %i: %d\n",
1054 				dev->irq, ret);
1055 			return ret;
1056 		}
1057 	}
1058 
1059 	ret = i2c_dw_init_recovery_info(dev);
1060 	if (ret)
1061 		return ret;
1062 
1063 	/*
1064 	 * Increment PM usage count during adapter registration in order to
1065 	 * avoid possible spurious runtime suspend when adapter device is
1066 	 * registered to the device core and immediate resume in case bus has
1067 	 * registered I2C slaves that do I2C transfers in their probe.
1068 	 */
1069 	pm_runtime_get_noresume(dev->dev);
1070 	ret = i2c_add_numbered_adapter(adap);
1071 	if (ret)
1072 		dev_err(dev->dev, "failure adding adapter: %d\n", ret);
1073 	pm_runtime_put_noidle(dev->dev);
1074 
1075 	return ret;
1076 }
1077 EXPORT_SYMBOL_GPL(i2c_dw_probe_master);
1078 
1079 MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter");
1080 MODULE_LICENSE("GPL");
1081 MODULE_IMPORT_NS(I2C_DW_COMMON);
1082