1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
4 *
5 * Copyright (C) 2011 Weinmann Medical GmbH
6 * Author: Nikolaus Voss <n.voss@weinmann.de>
7 *
8 * Evolved from original work by:
9 * Copyright (C) 2004 Rick Bronson
10 * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
11 *
12 * Borrowed heavily from original work by:
13 * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
14 */
15
16 #include <linux/clk.h>
17 #include <linux/completion.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/dmaengine.h>
20 #include <linux/err.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/i2c.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/of.h>
26 #include <linux/of_device.h>
27 #include <linux/pinctrl/consumer.h>
28 #include <linux/platform_device.h>
29 #include <linux/platform_data/dma-atmel.h>
30 #include <linux/pm_runtime.h>
31
32 #include "i2c-at91.h"
33
at91_init_twi_bus_master(struct at91_twi_dev * dev)34 void at91_init_twi_bus_master(struct at91_twi_dev *dev)
35 {
36 struct at91_twi_pdata *pdata = dev->pdata;
37 u32 filtr = 0;
38
39 /* FIFO should be enabled immediately after the software reset */
40 if (dev->fifo_size)
41 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_FIFOEN);
42 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
43 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
44 at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
45
46 /* enable digital filter */
47 if (pdata->has_dig_filtr && dev->enable_dig_filt)
48 filtr |= AT91_TWI_FILTR_FILT;
49
50 /* enable advanced digital filter */
51 if (pdata->has_adv_dig_filtr && dev->enable_dig_filt)
52 filtr |= AT91_TWI_FILTR_FILT |
53 (AT91_TWI_FILTR_THRES(dev->filter_width) &
54 AT91_TWI_FILTR_THRES_MASK);
55
56 /* enable analog filter */
57 if (pdata->has_ana_filtr && dev->enable_ana_filt)
58 filtr |= AT91_TWI_FILTR_PADFEN;
59
60 if (filtr)
61 at91_twi_write(dev, AT91_TWI_FILTR, filtr);
62 }
63
64 /*
65 * Calculate symmetric clock as stated in datasheet:
66 * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
67 */
at91_calc_twi_clock(struct at91_twi_dev * dev)68 static void at91_calc_twi_clock(struct at91_twi_dev *dev)
69 {
70 int ckdiv, cdiv, div, hold = 0, filter_width = 0;
71 struct at91_twi_pdata *pdata = dev->pdata;
72 int offset = pdata->clk_offset;
73 int max_ckdiv = pdata->clk_max_div;
74 struct i2c_timings timings, *t = &timings;
75
76 i2c_parse_fw_timings(dev->dev, t, true);
77
78 div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
79 2 * t->bus_freq_hz) - offset);
80 ckdiv = fls(div >> 8);
81 cdiv = div >> ckdiv;
82
83 if (ckdiv > max_ckdiv) {
84 dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n",
85 ckdiv, max_ckdiv);
86 ckdiv = max_ckdiv;
87 cdiv = 255;
88 }
89
90 if (pdata->has_hold_field) {
91 /*
92 * hold time = HOLD + 3 x T_peripheral_clock
93 * Use clk rate in kHz to prevent overflows when computing
94 * hold.
95 */
96 hold = DIV_ROUND_UP(t->sda_hold_ns
97 * (clk_get_rate(dev->clk) / 1000), 1000000);
98 hold -= 3;
99 if (hold < 0)
100 hold = 0;
101 if (hold > AT91_TWI_CWGR_HOLD_MAX) {
102 dev_warn(dev->dev,
103 "HOLD field set to its maximum value (%d instead of %d)\n",
104 AT91_TWI_CWGR_HOLD_MAX, hold);
105 hold = AT91_TWI_CWGR_HOLD_MAX;
106 }
107 }
108
109 if (pdata->has_adv_dig_filtr) {
110 /*
111 * filter width = 0 to AT91_TWI_FILTR_THRES_MAX
112 * peripheral clocks
113 */
114 filter_width = DIV_ROUND_UP(t->digital_filter_width_ns
115 * (clk_get_rate(dev->clk) / 1000), 1000000);
116 if (filter_width > AT91_TWI_FILTR_THRES_MAX) {
117 dev_warn(dev->dev,
118 "Filter threshold set to its maximum value (%d instead of %d)\n",
119 AT91_TWI_FILTR_THRES_MAX, filter_width);
120 filter_width = AT91_TWI_FILTR_THRES_MAX;
121 }
122 }
123
124 dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv
125 | AT91_TWI_CWGR_HOLD(hold);
126
127 dev->filter_width = filter_width;
128
129 dev_dbg(dev->dev, "cdiv %d ckdiv %d hold %d (%d ns), filter_width %d (%d ns)\n",
130 cdiv, ckdiv, hold, t->sda_hold_ns, filter_width,
131 t->digital_filter_width_ns);
132 }
133
at91_twi_dma_cleanup(struct at91_twi_dev * dev)134 static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
135 {
136 struct at91_twi_dma *dma = &dev->dma;
137
138 at91_twi_irq_save(dev);
139
140 if (dma->xfer_in_progress) {
141 if (dma->direction == DMA_FROM_DEVICE)
142 dmaengine_terminate_all(dma->chan_rx);
143 else
144 dmaengine_terminate_all(dma->chan_tx);
145 dma->xfer_in_progress = false;
146 }
147 if (dma->buf_mapped) {
148 dma_unmap_single(dev->dev, sg_dma_address(&dma->sg[0]),
149 dev->buf_len, dma->direction);
150 dma->buf_mapped = false;
151 }
152
153 at91_twi_irq_restore(dev);
154 }
155
at91_twi_write_next_byte(struct at91_twi_dev * dev)156 static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
157 {
158 if (!dev->buf_len)
159 return;
160
161 /* 8bit write works with and without FIFO */
162 writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
163
164 /* send stop when last byte has been written */
165 if (--dev->buf_len == 0) {
166 if (!dev->use_alt_cmd)
167 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
168 at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
169 }
170
171 dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
172
173 ++dev->buf;
174 }
175
at91_twi_write_data_dma_callback(void * data)176 static void at91_twi_write_data_dma_callback(void *data)
177 {
178 struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
179
180 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
181 dev->buf_len, DMA_TO_DEVICE);
182
183 /*
184 * When this callback is called, THR/TX FIFO is likely not to be empty
185 * yet. So we have to wait for TXCOMP or NACK bits to be set into the
186 * Status Register to be sure that the STOP bit has been sent and the
187 * transfer is completed. The NACK interrupt has already been enabled,
188 * we just have to enable TXCOMP one.
189 */
190 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
191 if (!dev->use_alt_cmd)
192 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
193 }
194
at91_twi_write_data_dma(struct at91_twi_dev * dev)195 static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
196 {
197 dma_addr_t dma_addr;
198 struct dma_async_tx_descriptor *txdesc;
199 struct at91_twi_dma *dma = &dev->dma;
200 struct dma_chan *chan_tx = dma->chan_tx;
201 unsigned int sg_len = 1;
202
203 if (!dev->buf_len)
204 return;
205
206 dma->direction = DMA_TO_DEVICE;
207
208 at91_twi_irq_save(dev);
209 dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
210 DMA_TO_DEVICE);
211 if (dma_mapping_error(dev->dev, dma_addr)) {
212 dev_err(dev->dev, "dma map failed\n");
213 return;
214 }
215 dma->buf_mapped = true;
216 at91_twi_irq_restore(dev);
217
218 if (dev->fifo_size) {
219 size_t part1_len, part2_len;
220 struct scatterlist *sg;
221 unsigned fifo_mr;
222
223 sg_len = 0;
224
225 part1_len = dev->buf_len & ~0x3;
226 if (part1_len) {
227 sg = &dma->sg[sg_len++];
228 sg_dma_len(sg) = part1_len;
229 sg_dma_address(sg) = dma_addr;
230 }
231
232 part2_len = dev->buf_len & 0x3;
233 if (part2_len) {
234 sg = &dma->sg[sg_len++];
235 sg_dma_len(sg) = part2_len;
236 sg_dma_address(sg) = dma_addr + part1_len;
237 }
238
239 /*
240 * DMA controller is triggered when at least 4 data can be
241 * written into the TX FIFO
242 */
243 fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
244 fifo_mr &= ~AT91_TWI_FMR_TXRDYM_MASK;
245 fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_FOUR_DATA);
246 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
247 } else {
248 sg_dma_len(&dma->sg[0]) = dev->buf_len;
249 sg_dma_address(&dma->sg[0]) = dma_addr;
250 }
251
252 txdesc = dmaengine_prep_slave_sg(chan_tx, dma->sg, sg_len,
253 DMA_MEM_TO_DEV,
254 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
255 if (!txdesc) {
256 dev_err(dev->dev, "dma prep slave sg failed\n");
257 goto error;
258 }
259
260 txdesc->callback = at91_twi_write_data_dma_callback;
261 txdesc->callback_param = dev;
262
263 dma->xfer_in_progress = true;
264 dmaengine_submit(txdesc);
265 dma_async_issue_pending(chan_tx);
266
267 return;
268
269 error:
270 at91_twi_dma_cleanup(dev);
271 }
272
at91_twi_read_next_byte(struct at91_twi_dev * dev)273 static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
274 {
275 /*
276 * If we are in this case, it means there is garbage data in RHR, so
277 * delete them.
278 */
279 if (!dev->buf_len) {
280 at91_twi_read(dev, AT91_TWI_RHR);
281 return;
282 }
283
284 /* 8bit read works with and without FIFO */
285 *dev->buf = readb_relaxed(dev->base + AT91_TWI_RHR);
286 --dev->buf_len;
287
288 /* return if aborting, we only needed to read RHR to clear RXRDY*/
289 if (dev->recv_len_abort)
290 return;
291
292 /* handle I2C_SMBUS_BLOCK_DATA */
293 if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
294 /* ensure length byte is a valid value */
295 if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
296 dev->msg->flags &= ~I2C_M_RECV_LEN;
297 dev->buf_len += *dev->buf;
298 dev->msg->len = dev->buf_len + 1;
299 dev_dbg(dev->dev, "received block length %zu\n",
300 dev->buf_len);
301 } else {
302 /* abort and send the stop by reading one more byte */
303 dev->recv_len_abort = true;
304 dev->buf_len = 1;
305 }
306 }
307
308 /* send stop if second but last byte has been read */
309 if (!dev->use_alt_cmd && dev->buf_len == 1)
310 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
311
312 dev_dbg(dev->dev, "read 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
313
314 ++dev->buf;
315 }
316
at91_twi_read_data_dma_callback(void * data)317 static void at91_twi_read_data_dma_callback(void *data)
318 {
319 struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
320 unsigned ier = AT91_TWI_TXCOMP;
321
322 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
323 dev->buf_len, DMA_FROM_DEVICE);
324
325 if (!dev->use_alt_cmd) {
326 /* The last two bytes have to be read without using dma */
327 dev->buf += dev->buf_len - 2;
328 dev->buf_len = 2;
329 ier |= AT91_TWI_RXRDY;
330 }
331 at91_twi_write(dev, AT91_TWI_IER, ier);
332 }
333
at91_twi_read_data_dma(struct at91_twi_dev * dev)334 static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
335 {
336 dma_addr_t dma_addr;
337 struct dma_async_tx_descriptor *rxdesc;
338 struct at91_twi_dma *dma = &dev->dma;
339 struct dma_chan *chan_rx = dma->chan_rx;
340 size_t buf_len;
341
342 buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
343 dma->direction = DMA_FROM_DEVICE;
344
345 /* Keep in mind that we won't use dma to read the last two bytes */
346 at91_twi_irq_save(dev);
347 dma_addr = dma_map_single(dev->dev, dev->buf, buf_len, DMA_FROM_DEVICE);
348 if (dma_mapping_error(dev->dev, dma_addr)) {
349 dev_err(dev->dev, "dma map failed\n");
350 return;
351 }
352 dma->buf_mapped = true;
353 at91_twi_irq_restore(dev);
354
355 if (dev->fifo_size && IS_ALIGNED(buf_len, 4)) {
356 unsigned fifo_mr;
357
358 /*
359 * DMA controller is triggered when at least 4 data can be
360 * read from the RX FIFO
361 */
362 fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
363 fifo_mr &= ~AT91_TWI_FMR_RXRDYM_MASK;
364 fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_FOUR_DATA);
365 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
366 }
367
368 sg_dma_len(&dma->sg[0]) = buf_len;
369 sg_dma_address(&dma->sg[0]) = dma_addr;
370
371 rxdesc = dmaengine_prep_slave_sg(chan_rx, dma->sg, 1, DMA_DEV_TO_MEM,
372 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
373 if (!rxdesc) {
374 dev_err(dev->dev, "dma prep slave sg failed\n");
375 goto error;
376 }
377
378 rxdesc->callback = at91_twi_read_data_dma_callback;
379 rxdesc->callback_param = dev;
380
381 dma->xfer_in_progress = true;
382 dmaengine_submit(rxdesc);
383 dma_async_issue_pending(dma->chan_rx);
384
385 return;
386
387 error:
388 at91_twi_dma_cleanup(dev);
389 }
390
atmel_twi_interrupt(int irq,void * dev_id)391 static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
392 {
393 struct at91_twi_dev *dev = dev_id;
394 const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
395 const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
396
397 if (!irqstatus)
398 return IRQ_NONE;
399 /*
400 * In reception, the behavior of the twi device (before sama5d2) is
401 * weird. There is some magic about RXRDY flag! When a data has been
402 * almost received, the reception of a new one is anticipated if there
403 * is no stop command to send. That is the reason why ask for sending
404 * the stop command not on the last data but on the second last one.
405 *
406 * Unfortunately, we could still have the RXRDY flag set even if the
407 * transfer is done and we have read the last data. It might happen
408 * when the i2c slave device sends too quickly data after receiving the
409 * ack from the master. The data has been almost received before having
410 * the order to send stop. In this case, sending the stop command could
411 * cause a RXRDY interrupt with a TXCOMP one. It is better to manage
412 * the RXRDY interrupt first in order to not keep garbage data in the
413 * Receive Holding Register for the next transfer.
414 */
415 if (irqstatus & AT91_TWI_RXRDY) {
416 /*
417 * Read all available bytes at once by polling RXRDY usable w/
418 * and w/o FIFO. With FIFO enabled we could also read RXFL and
419 * avoid polling RXRDY.
420 */
421 do {
422 at91_twi_read_next_byte(dev);
423 } while (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY);
424 }
425
426 /*
427 * When a NACK condition is detected, the I2C controller sets the NACK,
428 * TXCOMP and TXRDY bits all together in the Status Register (SR).
429 *
430 * 1 - Handling NACK errors with CPU write transfer.
431 *
432 * In such case, we should not write the next byte into the Transmit
433 * Holding Register (THR) otherwise the I2C controller would start a new
434 * transfer and the I2C slave is likely to reply by another NACK.
435 *
436 * 2 - Handling NACK errors with DMA write transfer.
437 *
438 * By setting the TXRDY bit in the SR, the I2C controller also triggers
439 * the DMA controller to write the next data into the THR. Then the
440 * result depends on the hardware version of the I2C controller.
441 *
442 * 2a - Without support of the Alternative Command mode.
443 *
444 * This is the worst case: the DMA controller is triggered to write the
445 * next data into the THR, hence starting a new transfer: the I2C slave
446 * is likely to reply by another NACK.
447 * Concurrently, this interrupt handler is likely to be called to manage
448 * the first NACK before the I2C controller detects the second NACK and
449 * sets once again the NACK bit into the SR.
450 * When handling the first NACK, this interrupt handler disables the I2C
451 * controller interruptions, especially the NACK interrupt.
452 * Hence, the NACK bit is pending into the SR. This is why we should
453 * read the SR to clear all pending interrupts at the beginning of
454 * at91_do_twi_transfer() before actually starting a new transfer.
455 *
456 * 2b - With support of the Alternative Command mode.
457 *
458 * When a NACK condition is detected, the I2C controller also locks the
459 * THR (and sets the LOCK bit in the SR): even though the DMA controller
460 * is triggered by the TXRDY bit to write the next data into the THR,
461 * this data actually won't go on the I2C bus hence a second NACK is not
462 * generated.
463 */
464 if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
465 at91_disable_twi_interrupts(dev);
466 complete(&dev->cmd_complete);
467 } else if (irqstatus & AT91_TWI_TXRDY) {
468 at91_twi_write_next_byte(dev);
469 }
470
471 /* catch error flags */
472 dev->transfer_status |= status;
473
474 return IRQ_HANDLED;
475 }
476
at91_do_twi_transfer(struct at91_twi_dev * dev)477 static int at91_do_twi_transfer(struct at91_twi_dev *dev)
478 {
479 int ret;
480 unsigned long time_left;
481 bool has_unre_flag = dev->pdata->has_unre_flag;
482 bool has_alt_cmd = dev->pdata->has_alt_cmd;
483
484 /*
485 * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
486 * read flag but shows the state of the transmission at the time the
487 * Status Register is read. According to the programmer datasheet,
488 * TXCOMP is set when both holding register and internal shifter are
489 * empty and STOP condition has been sent.
490 * Consequently, we should enable NACK interrupt rather than TXCOMP to
491 * detect transmission failure.
492 * Indeed let's take the case of an i2c write command using DMA.
493 * Whenever the slave doesn't acknowledge a byte, the LOCK, NACK and
494 * TXCOMP bits are set together into the Status Register.
495 * LOCK is a clear on write bit, which is set to prevent the DMA
496 * controller from sending new data on the i2c bus after a NACK
497 * condition has happened. Once locked, this i2c peripheral stops
498 * triggering the DMA controller for new data but it is more than
499 * likely that a new DMA transaction is already in progress, writing
500 * into the Transmit Holding Register. Since the peripheral is locked,
501 * these new data won't be sent to the i2c bus but they will remain
502 * into the Transmit Holding Register, so TXCOMP bit is cleared.
503 * Then when the interrupt handler is called, the Status Register is
504 * read: the TXCOMP bit is clear but NACK bit is still set. The driver
505 * manage the error properly, without waiting for timeout.
506 * This case can be reproduced easyly when writing into an at24 eeprom.
507 *
508 * Besides, the TXCOMP bit is already set before the i2c transaction
509 * has been started. For read transactions, this bit is cleared when
510 * writing the START bit into the Control Register. So the
511 * corresponding interrupt can safely be enabled just after.
512 * However for write transactions managed by the CPU, we first write
513 * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
514 * interrupt. If TXCOMP interrupt were enabled before writing into THR,
515 * the interrupt handler would be called immediately and the i2c command
516 * would be reported as completed.
517 * Also when a write transaction is managed by the DMA controller,
518 * enabling the TXCOMP interrupt in this function may lead to a race
519 * condition since we don't know whether the TXCOMP interrupt is enabled
520 * before or after the DMA has started to write into THR. So the TXCOMP
521 * interrupt is enabled later by at91_twi_write_data_dma_callback().
522 * Immediately after in that DMA callback, if the alternative command
523 * mode is not used, we still need to send the STOP condition manually
524 * writing the corresponding bit into the Control Register.
525 */
526
527 dev_dbg(dev->dev, "transfer: %s %zu bytes.\n",
528 (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
529
530 reinit_completion(&dev->cmd_complete);
531 dev->transfer_status = 0;
532
533 /* Clear pending interrupts, such as NACK. */
534 at91_twi_read(dev, AT91_TWI_SR);
535
536 if (dev->fifo_size) {
537 unsigned fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
538
539 /* Reset FIFO mode register */
540 fifo_mr &= ~(AT91_TWI_FMR_TXRDYM_MASK |
541 AT91_TWI_FMR_RXRDYM_MASK);
542 fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_ONE_DATA);
543 fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_ONE_DATA);
544 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
545
546 /* Flush FIFOs */
547 at91_twi_write(dev, AT91_TWI_CR,
548 AT91_TWI_THRCLR | AT91_TWI_RHRCLR);
549 }
550
551 if (!dev->buf_len) {
552 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
553 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
554 } else if (dev->msg->flags & I2C_M_RD) {
555 unsigned start_flags = AT91_TWI_START;
556
557 /* if only one byte is to be read, immediately stop transfer */
558 if (!dev->use_alt_cmd && dev->buf_len <= 1 &&
559 !(dev->msg->flags & I2C_M_RECV_LEN))
560 start_flags |= AT91_TWI_STOP;
561 at91_twi_write(dev, AT91_TWI_CR, start_flags);
562 /*
563 * When using dma without alternative command mode, the last
564 * byte has to be read manually in order to not send the stop
565 * command too late and then to receive extra data.
566 * In practice, there are some issues if you use the dma to
567 * read n-1 bytes because of latency.
568 * Reading n-2 bytes with dma and the two last ones manually
569 * seems to be the best solution.
570 */
571 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
572 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
573 at91_twi_read_data_dma(dev);
574 } else {
575 at91_twi_write(dev, AT91_TWI_IER,
576 AT91_TWI_TXCOMP |
577 AT91_TWI_NACK |
578 AT91_TWI_RXRDY);
579 }
580 } else {
581 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
582 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
583 at91_twi_write_data_dma(dev);
584 } else {
585 at91_twi_write_next_byte(dev);
586 at91_twi_write(dev, AT91_TWI_IER,
587 AT91_TWI_TXCOMP | AT91_TWI_NACK |
588 (dev->buf_len ? AT91_TWI_TXRDY : 0));
589 }
590 }
591
592 time_left = wait_for_completion_timeout(&dev->cmd_complete,
593 dev->adapter.timeout);
594 if (time_left == 0) {
595 dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR);
596 dev_err(dev->dev, "controller timed out\n");
597 at91_init_twi_bus(dev);
598 ret = -ETIMEDOUT;
599 goto error;
600 }
601 if (dev->transfer_status & AT91_TWI_NACK) {
602 dev_dbg(dev->dev, "received nack\n");
603 ret = -EREMOTEIO;
604 goto error;
605 }
606 if (dev->transfer_status & AT91_TWI_OVRE) {
607 dev_err(dev->dev, "overrun while reading\n");
608 ret = -EIO;
609 goto error;
610 }
611 if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
612 dev_err(dev->dev, "underrun while writing\n");
613 ret = -EIO;
614 goto error;
615 }
616 if ((has_alt_cmd || dev->fifo_size) &&
617 (dev->transfer_status & AT91_TWI_LOCK)) {
618 dev_err(dev->dev, "tx locked\n");
619 ret = -EIO;
620 goto error;
621 }
622 if (dev->recv_len_abort) {
623 dev_err(dev->dev, "invalid smbus block length recvd\n");
624 ret = -EPROTO;
625 goto error;
626 }
627
628 dev_dbg(dev->dev, "transfer complete\n");
629
630 return 0;
631
632 error:
633 /* first stop DMA transfer if still in progress */
634 at91_twi_dma_cleanup(dev);
635 /* then flush THR/FIFO and unlock TX if locked */
636 if ((has_alt_cmd || dev->fifo_size) &&
637 (dev->transfer_status & AT91_TWI_LOCK)) {
638 dev_dbg(dev->dev, "unlock tx\n");
639 at91_twi_write(dev, AT91_TWI_CR,
640 AT91_TWI_THRCLR | AT91_TWI_LOCKCLR);
641 }
642
643 /*
644 * some faulty I2C slave devices might hold SDA down;
645 * we can send a bus clear command, hoping that the pins will be
646 * released
647 */
648 i2c_recover_bus(&dev->adapter);
649
650 return ret;
651 }
652
at91_twi_xfer(struct i2c_adapter * adap,struct i2c_msg * msg,int num)653 static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
654 {
655 struct at91_twi_dev *dev = i2c_get_adapdata(adap);
656 int ret;
657 unsigned int_addr_flag = 0;
658 struct i2c_msg *m_start = msg;
659 bool is_read;
660 u8 *dma_buf = NULL;
661
662 dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
663
664 ret = pm_runtime_get_sync(dev->dev);
665 if (ret < 0)
666 goto out;
667
668 if (num == 2) {
669 int internal_address = 0;
670 int i;
671
672 /* 1st msg is put into the internal address, start with 2nd */
673 m_start = &msg[1];
674 for (i = 0; i < msg->len; ++i) {
675 const unsigned addr = msg->buf[msg->len - 1 - i];
676
677 internal_address |= addr << (8 * i);
678 int_addr_flag += AT91_TWI_IADRSZ_1;
679 }
680 at91_twi_write(dev, AT91_TWI_IADR, internal_address);
681 }
682
683 dev->use_alt_cmd = false;
684 is_read = (m_start->flags & I2C_M_RD);
685 if (dev->pdata->has_alt_cmd) {
686 if (m_start->len > 0 &&
687 m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) {
688 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
689 at91_twi_write(dev, AT91_TWI_ACR,
690 AT91_TWI_ACR_DATAL(m_start->len) |
691 ((is_read) ? AT91_TWI_ACR_DIR : 0));
692 dev->use_alt_cmd = true;
693 } else {
694 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
695 }
696 }
697
698 at91_twi_write(dev, AT91_TWI_MMR,
699 (m_start->addr << 16) |
700 int_addr_flag |
701 ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
702
703 dev->buf_len = m_start->len;
704 dev->buf = m_start->buf;
705 dev->msg = m_start;
706 dev->recv_len_abort = false;
707
708 if (dev->use_dma) {
709 dma_buf = i2c_get_dma_safe_msg_buf(m_start, 1);
710 if (!dma_buf) {
711 ret = -ENOMEM;
712 goto out;
713 }
714 dev->buf = dma_buf;
715 }
716
717 ret = at91_do_twi_transfer(dev);
718 i2c_put_dma_safe_msg_buf(dma_buf, m_start, !ret);
719
720 ret = (ret < 0) ? ret : num;
721 out:
722 pm_runtime_mark_last_busy(dev->dev);
723 pm_runtime_put_autosuspend(dev->dev);
724
725 return ret;
726 }
727
728 /*
729 * The hardware can handle at most two messages concatenated by a
730 * repeated start via it's internal address feature.
731 */
732 static const struct i2c_adapter_quirks at91_twi_quirks = {
733 .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR,
734 .max_comb_1st_msg_len = 3,
735 };
736
at91_twi_func(struct i2c_adapter * adapter)737 static u32 at91_twi_func(struct i2c_adapter *adapter)
738 {
739 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
740 | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
741 }
742
743 static const struct i2c_algorithm at91_twi_algorithm = {
744 .master_xfer = at91_twi_xfer,
745 .functionality = at91_twi_func,
746 };
747
at91_twi_configure_dma(struct at91_twi_dev * dev,u32 phy_addr)748 static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
749 {
750 int ret = 0;
751 struct dma_slave_config slave_config;
752 struct at91_twi_dma *dma = &dev->dma;
753 enum dma_slave_buswidth addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
754
755 /*
756 * The actual width of the access will be chosen in
757 * dmaengine_prep_slave_sg():
758 * for each buffer in the scatter-gather list, if its size is aligned
759 * to addr_width then addr_width accesses will be performed to transfer
760 * the buffer. On the other hand, if the buffer size is not aligned to
761 * addr_width then the buffer is transferred using single byte accesses.
762 * Please refer to the Atmel eXtended DMA controller driver.
763 * When FIFOs are used, the TXRDYM threshold can always be set to
764 * trigger the XDMAC when at least 4 data can be written into the TX
765 * FIFO, even if single byte accesses are performed.
766 * However the RXRDYM threshold must be set to fit the access width,
767 * deduced from buffer length, so the XDMAC is triggered properly to
768 * read data from the RX FIFO.
769 */
770 if (dev->fifo_size)
771 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
772
773 memset(&slave_config, 0, sizeof(slave_config));
774 slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
775 slave_config.src_addr_width = addr_width;
776 slave_config.src_maxburst = 1;
777 slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
778 slave_config.dst_addr_width = addr_width;
779 slave_config.dst_maxburst = 1;
780 slave_config.device_fc = false;
781
782 dma->chan_tx = dma_request_chan(dev->dev, "tx");
783 if (IS_ERR(dma->chan_tx)) {
784 ret = PTR_ERR(dma->chan_tx);
785 dma->chan_tx = NULL;
786 goto error;
787 }
788
789 dma->chan_rx = dma_request_chan(dev->dev, "rx");
790 if (IS_ERR(dma->chan_rx)) {
791 ret = PTR_ERR(dma->chan_rx);
792 dma->chan_rx = NULL;
793 goto error;
794 }
795
796 slave_config.direction = DMA_MEM_TO_DEV;
797 if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
798 dev_err(dev->dev, "failed to configure tx channel\n");
799 ret = -EINVAL;
800 goto error;
801 }
802
803 slave_config.direction = DMA_DEV_TO_MEM;
804 if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
805 dev_err(dev->dev, "failed to configure rx channel\n");
806 ret = -EINVAL;
807 goto error;
808 }
809
810 sg_init_table(dma->sg, 2);
811 dma->buf_mapped = false;
812 dma->xfer_in_progress = false;
813 dev->use_dma = true;
814
815 dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
816 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
817
818 return ret;
819
820 error:
821 if (ret != -EPROBE_DEFER)
822 dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n");
823 if (dma->chan_rx)
824 dma_release_channel(dma->chan_rx);
825 if (dma->chan_tx)
826 dma_release_channel(dma->chan_tx);
827 return ret;
828 }
829
at91_init_twi_recovery_gpio(struct platform_device * pdev,struct at91_twi_dev * dev)830 static int at91_init_twi_recovery_gpio(struct platform_device *pdev,
831 struct at91_twi_dev *dev)
832 {
833 struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
834
835 rinfo->pinctrl = devm_pinctrl_get(&pdev->dev);
836 if (!rinfo->pinctrl || IS_ERR(rinfo->pinctrl)) {
837 dev_info(dev->dev, "can't get pinctrl, bus recovery not supported\n");
838 return PTR_ERR(rinfo->pinctrl);
839 }
840 dev->adapter.bus_recovery_info = rinfo;
841
842 return 0;
843 }
844
at91_twi_recover_bus_cmd(struct i2c_adapter * adap)845 static int at91_twi_recover_bus_cmd(struct i2c_adapter *adap)
846 {
847 struct at91_twi_dev *dev = i2c_get_adapdata(adap);
848
849 dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR);
850 if (!(dev->transfer_status & AT91_TWI_SDA)) {
851 dev_dbg(dev->dev, "SDA is down; sending bus clear command\n");
852 if (dev->use_alt_cmd) {
853 unsigned int acr;
854
855 acr = at91_twi_read(dev, AT91_TWI_ACR);
856 acr &= ~AT91_TWI_ACR_DATAL_MASK;
857 at91_twi_write(dev, AT91_TWI_ACR, acr);
858 }
859 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_CLEAR);
860 }
861
862 return 0;
863 }
864
at91_init_twi_recovery_info(struct platform_device * pdev,struct at91_twi_dev * dev)865 static int at91_init_twi_recovery_info(struct platform_device *pdev,
866 struct at91_twi_dev *dev)
867 {
868 struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
869 bool has_clear_cmd = dev->pdata->has_clear_cmd;
870
871 if (!has_clear_cmd)
872 return at91_init_twi_recovery_gpio(pdev, dev);
873
874 rinfo->recover_bus = at91_twi_recover_bus_cmd;
875 dev->adapter.bus_recovery_info = rinfo;
876
877 return 0;
878 }
879
at91_twi_probe_master(struct platform_device * pdev,u32 phy_addr,struct at91_twi_dev * dev)880 int at91_twi_probe_master(struct platform_device *pdev,
881 u32 phy_addr, struct at91_twi_dev *dev)
882 {
883 int rc;
884
885 init_completion(&dev->cmd_complete);
886
887 rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
888 dev_name(dev->dev), dev);
889 if (rc) {
890 dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
891 return rc;
892 }
893
894 if (dev->dev->of_node) {
895 rc = at91_twi_configure_dma(dev, phy_addr);
896 if (rc == -EPROBE_DEFER)
897 return rc;
898 }
899
900 if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
901 &dev->fifo_size)) {
902 dev_info(dev->dev, "Using FIFO (%u data)\n", dev->fifo_size);
903 }
904
905 dev->enable_dig_filt = of_property_read_bool(pdev->dev.of_node,
906 "i2c-digital-filter");
907
908 dev->enable_ana_filt = of_property_read_bool(pdev->dev.of_node,
909 "i2c-analog-filter");
910 at91_calc_twi_clock(dev);
911
912 rc = at91_init_twi_recovery_info(pdev, dev);
913 if (rc == -EPROBE_DEFER)
914 return rc;
915
916 dev->adapter.algo = &at91_twi_algorithm;
917 dev->adapter.quirks = &at91_twi_quirks;
918
919 return 0;
920 }
921