1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * serial_tegra.c
4 *
5 * High-speed serial driver for NVIDIA Tegra SoCs
6 *
7 * Copyright (c) 2012-2019, NVIDIA CORPORATION. All rights reserved.
8 *
9 * Author: Laxman Dewangan <ldewangan@nvidia.com>
10 */
11
12 #include <linux/clk.h>
13 #include <linux/debugfs.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/err.h>
19 #include <linux/io.h>
20 #include <linux/irq.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_device.h>
24 #include <linux/pagemap.h>
25 #include <linux/platform_device.h>
26 #include <linux/reset.h>
27 #include <linux/serial.h>
28 #include <linux/serial_8250.h>
29 #include <linux/serial_core.h>
30 #include <linux/serial_reg.h>
31 #include <linux/slab.h>
32 #include <linux/string.h>
33 #include <linux/termios.h>
34 #include <linux/tty.h>
35 #include <linux/tty_flip.h>
36
37 #define TEGRA_UART_TYPE "TEGRA_UART"
38 #define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
39 #define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
40
41 #define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
42 #define TEGRA_UART_LSR_TXFIFO_FULL 0x100
43 #define TEGRA_UART_IER_EORD 0x20
44 #define TEGRA_UART_MCR_RTS_EN 0x40
45 #define TEGRA_UART_MCR_CTS_EN 0x20
46 #define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
47 UART_LSR_PE | UART_LSR_FE)
48 #define TEGRA_UART_IRDA_CSR 0x08
49 #define TEGRA_UART_SIR_ENABLED 0x80
50
51 #define TEGRA_UART_TX_PIO 1
52 #define TEGRA_UART_TX_DMA 2
53 #define TEGRA_UART_MIN_DMA 16
54 #define TEGRA_UART_FIFO_SIZE 32
55
56 /*
57 * Tx fifo trigger level setting in tegra uart is in
58 * reverse way then conventional uart.
59 */
60 #define TEGRA_UART_TX_TRIG_16B 0x00
61 #define TEGRA_UART_TX_TRIG_8B 0x10
62 #define TEGRA_UART_TX_TRIG_4B 0x20
63 #define TEGRA_UART_TX_TRIG_1B 0x30
64
65 #define TEGRA_UART_MAXIMUM 8
66
67 /* Default UART setting when started: 115200 no parity, stop, 8 data bits */
68 #define TEGRA_UART_DEFAULT_BAUD 115200
69 #define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
70
71 /* Tx transfer mode */
72 #define TEGRA_TX_PIO 1
73 #define TEGRA_TX_DMA 2
74
75 #define TEGRA_UART_FCR_IIR_FIFO_EN 0x40
76
77 /**
78 * tegra_uart_chip_data: SOC specific data.
79 *
80 * @tx_fifo_full_status: Status flag available for checking tx fifo full.
81 * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
82 * Tegra30 does not allow this.
83 * @support_clk_src_div: Clock source support the clock divider.
84 */
85 struct tegra_uart_chip_data {
86 bool tx_fifo_full_status;
87 bool allow_txfifo_reset_fifo_mode;
88 bool support_clk_src_div;
89 bool fifo_mode_enable_status;
90 int uart_max_port;
91 int max_dma_burst_bytes;
92 int error_tolerance_low_range;
93 int error_tolerance_high_range;
94 };
95
96 struct tegra_baud_tolerance {
97 u32 lower_range_baud;
98 u32 upper_range_baud;
99 s32 tolerance;
100 };
101
102 struct tegra_uart_port {
103 struct uart_port uport;
104 const struct tegra_uart_chip_data *cdata;
105
106 struct clk *uart_clk;
107 struct reset_control *rst;
108 unsigned int current_baud;
109
110 /* Register shadow */
111 unsigned long fcr_shadow;
112 unsigned long mcr_shadow;
113 unsigned long lcr_shadow;
114 unsigned long ier_shadow;
115 bool rts_active;
116
117 int tx_in_progress;
118 unsigned int tx_bytes;
119
120 bool enable_modem_interrupt;
121
122 bool rx_timeout;
123 int rx_in_progress;
124 int symb_bit;
125
126 struct dma_chan *rx_dma_chan;
127 struct dma_chan *tx_dma_chan;
128 dma_addr_t rx_dma_buf_phys;
129 dma_addr_t tx_dma_buf_phys;
130 unsigned char *rx_dma_buf_virt;
131 unsigned char *tx_dma_buf_virt;
132 struct dma_async_tx_descriptor *tx_dma_desc;
133 struct dma_async_tx_descriptor *rx_dma_desc;
134 dma_cookie_t tx_cookie;
135 dma_cookie_t rx_cookie;
136 unsigned int tx_bytes_requested;
137 unsigned int rx_bytes_requested;
138 struct tegra_baud_tolerance *baud_tolerance;
139 int n_adjustable_baud_rates;
140 int required_rate;
141 int configured_rate;
142 bool use_rx_pio;
143 bool use_tx_pio;
144 bool rx_dma_active;
145 };
146
147 static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
148 static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
149 static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
150 bool dma_to_memory);
151
tegra_uart_read(struct tegra_uart_port * tup,unsigned long reg)152 static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
153 unsigned long reg)
154 {
155 return readl(tup->uport.membase + (reg << tup->uport.regshift));
156 }
157
tegra_uart_write(struct tegra_uart_port * tup,unsigned val,unsigned long reg)158 static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
159 unsigned long reg)
160 {
161 writel(val, tup->uport.membase + (reg << tup->uport.regshift));
162 }
163
to_tegra_uport(struct uart_port * u)164 static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
165 {
166 return container_of(u, struct tegra_uart_port, uport);
167 }
168
tegra_uart_get_mctrl(struct uart_port * u)169 static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
170 {
171 struct tegra_uart_port *tup = to_tegra_uport(u);
172
173 /*
174 * RI - Ring detector is active
175 * CD/DCD/CAR - Carrier detect is always active. For some reason
176 * linux has different names for carrier detect.
177 * DSR - Data Set ready is active as the hardware doesn't support it.
178 * Don't know if the linux support this yet?
179 * CTS - Clear to send. Always set to active, as the hardware handles
180 * CTS automatically.
181 */
182 if (tup->enable_modem_interrupt)
183 return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
184 return TIOCM_CTS;
185 }
186
set_rts(struct tegra_uart_port * tup,bool active)187 static void set_rts(struct tegra_uart_port *tup, bool active)
188 {
189 unsigned long mcr;
190
191 mcr = tup->mcr_shadow;
192 if (active)
193 mcr |= TEGRA_UART_MCR_RTS_EN;
194 else
195 mcr &= ~TEGRA_UART_MCR_RTS_EN;
196 if (mcr != tup->mcr_shadow) {
197 tegra_uart_write(tup, mcr, UART_MCR);
198 tup->mcr_shadow = mcr;
199 }
200 }
201
set_dtr(struct tegra_uart_port * tup,bool active)202 static void set_dtr(struct tegra_uart_port *tup, bool active)
203 {
204 unsigned long mcr;
205
206 mcr = tup->mcr_shadow;
207 if (active)
208 mcr |= UART_MCR_DTR;
209 else
210 mcr &= ~UART_MCR_DTR;
211 if (mcr != tup->mcr_shadow) {
212 tegra_uart_write(tup, mcr, UART_MCR);
213 tup->mcr_shadow = mcr;
214 }
215 }
216
set_loopbk(struct tegra_uart_port * tup,bool active)217 static void set_loopbk(struct tegra_uart_port *tup, bool active)
218 {
219 unsigned long mcr = tup->mcr_shadow;
220
221 if (active)
222 mcr |= UART_MCR_LOOP;
223 else
224 mcr &= ~UART_MCR_LOOP;
225
226 if (mcr != tup->mcr_shadow) {
227 tegra_uart_write(tup, mcr, UART_MCR);
228 tup->mcr_shadow = mcr;
229 }
230 }
231
tegra_uart_set_mctrl(struct uart_port * u,unsigned int mctrl)232 static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
233 {
234 struct tegra_uart_port *tup = to_tegra_uport(u);
235 int enable;
236
237 tup->rts_active = !!(mctrl & TIOCM_RTS);
238 set_rts(tup, tup->rts_active);
239
240 enable = !!(mctrl & TIOCM_DTR);
241 set_dtr(tup, enable);
242
243 enable = !!(mctrl & TIOCM_LOOP);
244 set_loopbk(tup, enable);
245 }
246
tegra_uart_break_ctl(struct uart_port * u,int break_ctl)247 static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
248 {
249 struct tegra_uart_port *tup = to_tegra_uport(u);
250 unsigned long lcr;
251
252 lcr = tup->lcr_shadow;
253 if (break_ctl)
254 lcr |= UART_LCR_SBC;
255 else
256 lcr &= ~UART_LCR_SBC;
257 tegra_uart_write(tup, lcr, UART_LCR);
258 tup->lcr_shadow = lcr;
259 }
260
261 /**
262 * tegra_uart_wait_cycle_time: Wait for N UART clock periods
263 *
264 * @tup: Tegra serial port data structure.
265 * @cycles: Number of clock periods to wait.
266 *
267 * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART
268 * clock speed is 16X the current baud rate.
269 */
tegra_uart_wait_cycle_time(struct tegra_uart_port * tup,unsigned int cycles)270 static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup,
271 unsigned int cycles)
272 {
273 if (tup->current_baud)
274 udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16));
275 }
276
277 /* Wait for a symbol-time. */
tegra_uart_wait_sym_time(struct tegra_uart_port * tup,unsigned int syms)278 static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
279 unsigned int syms)
280 {
281 if (tup->current_baud)
282 udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
283 tup->current_baud));
284 }
285
tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port * tup)286 static int tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port *tup)
287 {
288 unsigned long iir;
289 unsigned int tmout = 100;
290
291 do {
292 iir = tegra_uart_read(tup, UART_IIR);
293 if (iir & TEGRA_UART_FCR_IIR_FIFO_EN)
294 return 0;
295 udelay(1);
296 } while (--tmout);
297
298 return -ETIMEDOUT;
299 }
300
tegra_uart_fifo_reset(struct tegra_uart_port * tup,u8 fcr_bits)301 static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
302 {
303 unsigned long fcr = tup->fcr_shadow;
304 unsigned int lsr, tmout = 10000;
305
306 if (tup->rts_active)
307 set_rts(tup, false);
308
309 if (tup->cdata->allow_txfifo_reset_fifo_mode) {
310 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
311 tegra_uart_write(tup, fcr, UART_FCR);
312 } else {
313 fcr &= ~UART_FCR_ENABLE_FIFO;
314 tegra_uart_write(tup, fcr, UART_FCR);
315 udelay(60);
316 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
317 tegra_uart_write(tup, fcr, UART_FCR);
318 fcr |= UART_FCR_ENABLE_FIFO;
319 tegra_uart_write(tup, fcr, UART_FCR);
320 if (tup->cdata->fifo_mode_enable_status)
321 tegra_uart_wait_fifo_mode_enabled(tup);
322 }
323
324 /* Dummy read to ensure the write is posted */
325 tegra_uart_read(tup, UART_SCR);
326
327 /*
328 * For all tegra devices (up to t210), there is a hardware issue that
329 * requires software to wait for 32 UART clock periods for the flush
330 * to propagate, otherwise data could be lost.
331 */
332 tegra_uart_wait_cycle_time(tup, 32);
333
334 do {
335 lsr = tegra_uart_read(tup, UART_LSR);
336 if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
337 break;
338 udelay(1);
339 } while (--tmout);
340
341 if (tup->rts_active)
342 set_rts(tup, true);
343 }
344
tegra_get_tolerance_rate(struct tegra_uart_port * tup,unsigned int baud,long rate)345 static long tegra_get_tolerance_rate(struct tegra_uart_port *tup,
346 unsigned int baud, long rate)
347 {
348 int i;
349
350 for (i = 0; i < tup->n_adjustable_baud_rates; ++i) {
351 if (baud >= tup->baud_tolerance[i].lower_range_baud &&
352 baud <= tup->baud_tolerance[i].upper_range_baud)
353 return (rate + (rate *
354 tup->baud_tolerance[i].tolerance) / 10000);
355 }
356
357 return rate;
358 }
359
tegra_check_rate_in_range(struct tegra_uart_port * tup)360 static int tegra_check_rate_in_range(struct tegra_uart_port *tup)
361 {
362 long diff;
363
364 diff = ((long)(tup->configured_rate - tup->required_rate) * 10000)
365 / tup->required_rate;
366 if (diff < (tup->cdata->error_tolerance_low_range * 100) ||
367 diff > (tup->cdata->error_tolerance_high_range * 100)) {
368 dev_err(tup->uport.dev,
369 "configured baud rate is out of range by %ld", diff);
370 return -EIO;
371 }
372
373 return 0;
374 }
375
tegra_set_baudrate(struct tegra_uart_port * tup,unsigned int baud)376 static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
377 {
378 unsigned long rate;
379 unsigned int divisor;
380 unsigned long lcr;
381 unsigned long flags;
382 int ret;
383
384 if (tup->current_baud == baud)
385 return 0;
386
387 if (tup->cdata->support_clk_src_div) {
388 rate = baud * 16;
389 tup->required_rate = rate;
390
391 if (tup->n_adjustable_baud_rates)
392 rate = tegra_get_tolerance_rate(tup, baud, rate);
393
394 ret = clk_set_rate(tup->uart_clk, rate);
395 if (ret < 0) {
396 dev_err(tup->uport.dev,
397 "clk_set_rate() failed for rate %lu\n", rate);
398 return ret;
399 }
400 tup->configured_rate = clk_get_rate(tup->uart_clk);
401 divisor = 1;
402 ret = tegra_check_rate_in_range(tup);
403 if (ret < 0)
404 return ret;
405 } else {
406 rate = clk_get_rate(tup->uart_clk);
407 divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
408 }
409
410 spin_lock_irqsave(&tup->uport.lock, flags);
411 lcr = tup->lcr_shadow;
412 lcr |= UART_LCR_DLAB;
413 tegra_uart_write(tup, lcr, UART_LCR);
414
415 tegra_uart_write(tup, divisor & 0xFF, UART_TX);
416 tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
417
418 lcr &= ~UART_LCR_DLAB;
419 tegra_uart_write(tup, lcr, UART_LCR);
420
421 /* Dummy read to ensure the write is posted */
422 tegra_uart_read(tup, UART_SCR);
423 spin_unlock_irqrestore(&tup->uport.lock, flags);
424
425 tup->current_baud = baud;
426
427 /* wait two character intervals at new rate */
428 tegra_uart_wait_sym_time(tup, 2);
429 return 0;
430 }
431
tegra_uart_decode_rx_error(struct tegra_uart_port * tup,unsigned long lsr)432 static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
433 unsigned long lsr)
434 {
435 char flag = TTY_NORMAL;
436
437 if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
438 if (lsr & UART_LSR_OE) {
439 /* Overrrun error */
440 flag = TTY_OVERRUN;
441 tup->uport.icount.overrun++;
442 dev_dbg(tup->uport.dev, "Got overrun errors\n");
443 } else if (lsr & UART_LSR_PE) {
444 /* Parity error */
445 flag = TTY_PARITY;
446 tup->uport.icount.parity++;
447 dev_dbg(tup->uport.dev, "Got Parity errors\n");
448 } else if (lsr & UART_LSR_FE) {
449 flag = TTY_FRAME;
450 tup->uport.icount.frame++;
451 dev_dbg(tup->uport.dev, "Got frame errors\n");
452 } else if (lsr & UART_LSR_BI) {
453 /*
454 * Break error
455 * If FIFO read error without any data, reset Rx FIFO
456 */
457 if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
458 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
459 if (tup->uport.ignore_status_mask & UART_LSR_BI)
460 return TTY_BREAK;
461 flag = TTY_BREAK;
462 tup->uport.icount.brk++;
463 dev_dbg(tup->uport.dev, "Got Break\n");
464 }
465 uart_insert_char(&tup->uport, lsr, UART_LSR_OE, 0, flag);
466 }
467
468 return flag;
469 }
470
tegra_uart_request_port(struct uart_port * u)471 static int tegra_uart_request_port(struct uart_port *u)
472 {
473 return 0;
474 }
475
tegra_uart_release_port(struct uart_port * u)476 static void tegra_uart_release_port(struct uart_port *u)
477 {
478 /* Nothing to do here */
479 }
480
tegra_uart_fill_tx_fifo(struct tegra_uart_port * tup,int max_bytes)481 static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
482 {
483 struct circ_buf *xmit = &tup->uport.state->xmit;
484 int i;
485
486 for (i = 0; i < max_bytes; i++) {
487 BUG_ON(uart_circ_empty(xmit));
488 if (tup->cdata->tx_fifo_full_status) {
489 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
490 if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
491 break;
492 }
493 tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
494 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
495 tup->uport.icount.tx++;
496 }
497 }
498
tegra_uart_start_pio_tx(struct tegra_uart_port * tup,unsigned int bytes)499 static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
500 unsigned int bytes)
501 {
502 if (bytes > TEGRA_UART_MIN_DMA)
503 bytes = TEGRA_UART_MIN_DMA;
504
505 tup->tx_in_progress = TEGRA_UART_TX_PIO;
506 tup->tx_bytes = bytes;
507 tup->ier_shadow |= UART_IER_THRI;
508 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
509 }
510
tegra_uart_tx_dma_complete(void * args)511 static void tegra_uart_tx_dma_complete(void *args)
512 {
513 struct tegra_uart_port *tup = args;
514 struct circ_buf *xmit = &tup->uport.state->xmit;
515 struct dma_tx_state state;
516 unsigned long flags;
517 unsigned int count;
518
519 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
520 count = tup->tx_bytes_requested - state.residue;
521 async_tx_ack(tup->tx_dma_desc);
522 spin_lock_irqsave(&tup->uport.lock, flags);
523 uart_xmit_advance(&tup->uport, count);
524 tup->tx_in_progress = 0;
525 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
526 uart_write_wakeup(&tup->uport);
527 tegra_uart_start_next_tx(tup);
528 spin_unlock_irqrestore(&tup->uport.lock, flags);
529 }
530
tegra_uart_start_tx_dma(struct tegra_uart_port * tup,unsigned long count)531 static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
532 unsigned long count)
533 {
534 struct circ_buf *xmit = &tup->uport.state->xmit;
535 dma_addr_t tx_phys_addr;
536
537 tup->tx_bytes = count & ~(0xF);
538 tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
539
540 dma_sync_single_for_device(tup->uport.dev, tx_phys_addr,
541 tup->tx_bytes, DMA_TO_DEVICE);
542
543 tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
544 tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
545 DMA_PREP_INTERRUPT);
546 if (!tup->tx_dma_desc) {
547 dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
548 return -EIO;
549 }
550
551 tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
552 tup->tx_dma_desc->callback_param = tup;
553 tup->tx_in_progress = TEGRA_UART_TX_DMA;
554 tup->tx_bytes_requested = tup->tx_bytes;
555 tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
556 dma_async_issue_pending(tup->tx_dma_chan);
557 return 0;
558 }
559
tegra_uart_start_next_tx(struct tegra_uart_port * tup)560 static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
561 {
562 unsigned long tail;
563 unsigned long count;
564 struct circ_buf *xmit = &tup->uport.state->xmit;
565
566 if (!tup->current_baud)
567 return;
568
569 tail = (unsigned long)&xmit->buf[xmit->tail];
570 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
571 if (!count)
572 return;
573
574 if (tup->use_tx_pio || count < TEGRA_UART_MIN_DMA)
575 tegra_uart_start_pio_tx(tup, count);
576 else if (BYTES_TO_ALIGN(tail) > 0)
577 tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
578 else
579 tegra_uart_start_tx_dma(tup, count);
580 }
581
582 /* Called by serial core driver with u->lock taken. */
tegra_uart_start_tx(struct uart_port * u)583 static void tegra_uart_start_tx(struct uart_port *u)
584 {
585 struct tegra_uart_port *tup = to_tegra_uport(u);
586 struct circ_buf *xmit = &u->state->xmit;
587
588 if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
589 tegra_uart_start_next_tx(tup);
590 }
591
tegra_uart_tx_empty(struct uart_port * u)592 static unsigned int tegra_uart_tx_empty(struct uart_port *u)
593 {
594 struct tegra_uart_port *tup = to_tegra_uport(u);
595 unsigned int ret = 0;
596 unsigned long flags;
597
598 spin_lock_irqsave(&u->lock, flags);
599 if (!tup->tx_in_progress) {
600 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
601 if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
602 ret = TIOCSER_TEMT;
603 }
604 spin_unlock_irqrestore(&u->lock, flags);
605 return ret;
606 }
607
tegra_uart_stop_tx(struct uart_port * u)608 static void tegra_uart_stop_tx(struct uart_port *u)
609 {
610 struct tegra_uart_port *tup = to_tegra_uport(u);
611 struct dma_tx_state state;
612 unsigned int count;
613
614 if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
615 return;
616
617 dmaengine_pause(tup->tx_dma_chan);
618 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
619 dmaengine_terminate_all(tup->tx_dma_chan);
620 count = tup->tx_bytes_requested - state.residue;
621 async_tx_ack(tup->tx_dma_desc);
622 uart_xmit_advance(&tup->uport, count);
623 tup->tx_in_progress = 0;
624 }
625
tegra_uart_handle_tx_pio(struct tegra_uart_port * tup)626 static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
627 {
628 struct circ_buf *xmit = &tup->uport.state->xmit;
629
630 tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
631 tup->tx_in_progress = 0;
632 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
633 uart_write_wakeup(&tup->uport);
634 tegra_uart_start_next_tx(tup);
635 }
636
tegra_uart_handle_rx_pio(struct tegra_uart_port * tup,struct tty_port * port)637 static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
638 struct tty_port *port)
639 {
640 do {
641 char flag = TTY_NORMAL;
642 unsigned long lsr = 0;
643 unsigned char ch;
644
645 lsr = tegra_uart_read(tup, UART_LSR);
646 if (!(lsr & UART_LSR_DR))
647 break;
648
649 flag = tegra_uart_decode_rx_error(tup, lsr);
650 if (flag != TTY_NORMAL)
651 continue;
652
653 ch = (unsigned char) tegra_uart_read(tup, UART_RX);
654 tup->uport.icount.rx++;
655
656 if (uart_handle_sysrq_char(&tup->uport, ch))
657 continue;
658
659 if (tup->uport.ignore_status_mask & UART_LSR_DR)
660 continue;
661
662 tty_insert_flip_char(port, ch, flag);
663 } while (1);
664 }
665
tegra_uart_copy_rx_to_tty(struct tegra_uart_port * tup,struct tty_port * port,unsigned int count)666 static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
667 struct tty_port *port,
668 unsigned int count)
669 {
670 int copied;
671
672 /* If count is zero, then there is no data to be copied */
673 if (!count)
674 return;
675
676 tup->uport.icount.rx += count;
677
678 if (tup->uport.ignore_status_mask & UART_LSR_DR)
679 return;
680
681 dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
682 count, DMA_FROM_DEVICE);
683 copied = tty_insert_flip_string(port,
684 ((unsigned char *)(tup->rx_dma_buf_virt)), count);
685 if (copied != count) {
686 WARN_ON(1);
687 dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
688 }
689 dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
690 count, DMA_TO_DEVICE);
691 }
692
do_handle_rx_pio(struct tegra_uart_port * tup)693 static void do_handle_rx_pio(struct tegra_uart_port *tup)
694 {
695 struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
696 struct tty_port *port = &tup->uport.state->port;
697
698 tegra_uart_handle_rx_pio(tup, port);
699 if (tty) {
700 tty_flip_buffer_push(port);
701 tty_kref_put(tty);
702 }
703 }
704
tegra_uart_rx_buffer_push(struct tegra_uart_port * tup,unsigned int residue)705 static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
706 unsigned int residue)
707 {
708 struct tty_port *port = &tup->uport.state->port;
709 unsigned int count;
710
711 async_tx_ack(tup->rx_dma_desc);
712 count = tup->rx_bytes_requested - residue;
713
714 /* If we are here, DMA is stopped */
715 tegra_uart_copy_rx_to_tty(tup, port, count);
716
717 do_handle_rx_pio(tup);
718 }
719
tegra_uart_rx_dma_complete(void * args)720 static void tegra_uart_rx_dma_complete(void *args)
721 {
722 struct tegra_uart_port *tup = args;
723 struct uart_port *u = &tup->uport;
724 unsigned long flags;
725 struct dma_tx_state state;
726 enum dma_status status;
727
728 spin_lock_irqsave(&u->lock, flags);
729
730 status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
731
732 if (status == DMA_IN_PROGRESS) {
733 dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
734 goto done;
735 }
736
737 /* Deactivate flow control to stop sender */
738 if (tup->rts_active)
739 set_rts(tup, false);
740
741 tup->rx_dma_active = false;
742 tegra_uart_rx_buffer_push(tup, 0);
743 tegra_uart_start_rx_dma(tup);
744
745 /* Activate flow control to start transfer */
746 if (tup->rts_active)
747 set_rts(tup, true);
748
749 done:
750 spin_unlock_irqrestore(&u->lock, flags);
751 }
752
tegra_uart_terminate_rx_dma(struct tegra_uart_port * tup)753 static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
754 {
755 struct dma_tx_state state;
756
757 if (!tup->rx_dma_active) {
758 do_handle_rx_pio(tup);
759 return;
760 }
761
762 dmaengine_pause(tup->rx_dma_chan);
763 dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
764 dmaengine_terminate_all(tup->rx_dma_chan);
765
766 tegra_uart_rx_buffer_push(tup, state.residue);
767 tup->rx_dma_active = false;
768 }
769
tegra_uart_handle_rx_dma(struct tegra_uart_port * tup)770 static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
771 {
772 /* Deactivate flow control to stop sender */
773 if (tup->rts_active)
774 set_rts(tup, false);
775
776 tegra_uart_terminate_rx_dma(tup);
777
778 if (tup->rts_active)
779 set_rts(tup, true);
780 }
781
tegra_uart_start_rx_dma(struct tegra_uart_port * tup)782 static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
783 {
784 unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
785
786 if (tup->rx_dma_active)
787 return 0;
788
789 tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
790 tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
791 DMA_PREP_INTERRUPT);
792 if (!tup->rx_dma_desc) {
793 dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
794 return -EIO;
795 }
796
797 tup->rx_dma_active = true;
798 tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
799 tup->rx_dma_desc->callback_param = tup;
800 tup->rx_bytes_requested = count;
801 tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
802 dma_async_issue_pending(tup->rx_dma_chan);
803 return 0;
804 }
805
tegra_uart_handle_modem_signal_change(struct uart_port * u)806 static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
807 {
808 struct tegra_uart_port *tup = to_tegra_uport(u);
809 unsigned long msr;
810
811 msr = tegra_uart_read(tup, UART_MSR);
812 if (!(msr & UART_MSR_ANY_DELTA))
813 return;
814
815 if (msr & UART_MSR_TERI)
816 tup->uport.icount.rng++;
817 if (msr & UART_MSR_DDSR)
818 tup->uport.icount.dsr++;
819 /* We may only get DDCD when HW init and reset */
820 if (msr & UART_MSR_DDCD)
821 uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
822 /* Will start/stop_tx accordingly */
823 if (msr & UART_MSR_DCTS)
824 uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
825 }
826
tegra_uart_isr(int irq,void * data)827 static irqreturn_t tegra_uart_isr(int irq, void *data)
828 {
829 struct tegra_uart_port *tup = data;
830 struct uart_port *u = &tup->uport;
831 unsigned long iir;
832 unsigned long ier;
833 bool is_rx_start = false;
834 bool is_rx_int = false;
835 unsigned long flags;
836
837 spin_lock_irqsave(&u->lock, flags);
838 while (1) {
839 iir = tegra_uart_read(tup, UART_IIR);
840 if (iir & UART_IIR_NO_INT) {
841 if (!tup->use_rx_pio && is_rx_int) {
842 tegra_uart_handle_rx_dma(tup);
843 if (tup->rx_in_progress) {
844 ier = tup->ier_shadow;
845 ier |= (UART_IER_RLSI | UART_IER_RTOIE |
846 TEGRA_UART_IER_EORD | UART_IER_RDI);
847 tup->ier_shadow = ier;
848 tegra_uart_write(tup, ier, UART_IER);
849 }
850 } else if (is_rx_start) {
851 tegra_uart_start_rx_dma(tup);
852 }
853 spin_unlock_irqrestore(&u->lock, flags);
854 return IRQ_HANDLED;
855 }
856
857 switch ((iir >> 1) & 0x7) {
858 case 0: /* Modem signal change interrupt */
859 tegra_uart_handle_modem_signal_change(u);
860 break;
861
862 case 1: /* Transmit interrupt only triggered when using PIO */
863 tup->ier_shadow &= ~UART_IER_THRI;
864 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
865 tegra_uart_handle_tx_pio(tup);
866 break;
867
868 case 4: /* End of data */
869 case 6: /* Rx timeout */
870 if (!tup->use_rx_pio) {
871 is_rx_int = tup->rx_in_progress;
872 /* Disable Rx interrupts */
873 ier = tup->ier_shadow;
874 ier &= ~(UART_IER_RDI | UART_IER_RLSI |
875 UART_IER_RTOIE | TEGRA_UART_IER_EORD);
876 tup->ier_shadow = ier;
877 tegra_uart_write(tup, ier, UART_IER);
878 break;
879 }
880 fallthrough;
881 case 2: /* Receive */
882 if (!tup->use_rx_pio) {
883 is_rx_start = tup->rx_in_progress;
884 tup->ier_shadow &= ~UART_IER_RDI;
885 tegra_uart_write(tup, tup->ier_shadow,
886 UART_IER);
887 } else {
888 do_handle_rx_pio(tup);
889 }
890 break;
891
892 case 3: /* Receive error */
893 tegra_uart_decode_rx_error(tup,
894 tegra_uart_read(tup, UART_LSR));
895 break;
896
897 case 5: /* break nothing to handle */
898 case 7: /* break nothing to handle */
899 break;
900 }
901 }
902 }
903
tegra_uart_stop_rx(struct uart_port * u)904 static void tegra_uart_stop_rx(struct uart_port *u)
905 {
906 struct tegra_uart_port *tup = to_tegra_uport(u);
907 struct tty_port *port = &tup->uport.state->port;
908 unsigned long ier;
909
910 if (tup->rts_active)
911 set_rts(tup, false);
912
913 if (!tup->rx_in_progress)
914 return;
915
916 tegra_uart_wait_sym_time(tup, 1); /* wait one character interval */
917
918 ier = tup->ier_shadow;
919 ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
920 TEGRA_UART_IER_EORD);
921 tup->ier_shadow = ier;
922 tegra_uart_write(tup, ier, UART_IER);
923 tup->rx_in_progress = 0;
924
925 if (!tup->use_rx_pio)
926 tegra_uart_terminate_rx_dma(tup);
927 else
928 tegra_uart_handle_rx_pio(tup, port);
929 }
930
tegra_uart_hw_deinit(struct tegra_uart_port * tup)931 static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
932 {
933 unsigned long flags;
934 unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
935 unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
936 unsigned long wait_time;
937 unsigned long lsr;
938 unsigned long msr;
939 unsigned long mcr;
940
941 /* Disable interrupts */
942 tegra_uart_write(tup, 0, UART_IER);
943
944 lsr = tegra_uart_read(tup, UART_LSR);
945 if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
946 msr = tegra_uart_read(tup, UART_MSR);
947 mcr = tegra_uart_read(tup, UART_MCR);
948 if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
949 dev_err(tup->uport.dev,
950 "Tx Fifo not empty, CTS disabled, waiting\n");
951
952 /* Wait for Tx fifo to be empty */
953 while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
954 wait_time = min(fifo_empty_time, 100lu);
955 udelay(wait_time);
956 fifo_empty_time -= wait_time;
957 if (!fifo_empty_time) {
958 msr = tegra_uart_read(tup, UART_MSR);
959 mcr = tegra_uart_read(tup, UART_MCR);
960 if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
961 (msr & UART_MSR_CTS))
962 dev_err(tup->uport.dev,
963 "Slave not ready\n");
964 break;
965 }
966 lsr = tegra_uart_read(tup, UART_LSR);
967 }
968 }
969
970 spin_lock_irqsave(&tup->uport.lock, flags);
971 /* Reset the Rx and Tx FIFOs */
972 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
973 tup->current_baud = 0;
974 spin_unlock_irqrestore(&tup->uport.lock, flags);
975
976 tup->rx_in_progress = 0;
977 tup->tx_in_progress = 0;
978
979 if (!tup->use_rx_pio)
980 tegra_uart_dma_channel_free(tup, true);
981 if (!tup->use_tx_pio)
982 tegra_uart_dma_channel_free(tup, false);
983
984 clk_disable_unprepare(tup->uart_clk);
985 }
986
tegra_uart_hw_init(struct tegra_uart_port * tup)987 static int tegra_uart_hw_init(struct tegra_uart_port *tup)
988 {
989 int ret;
990
991 tup->fcr_shadow = 0;
992 tup->mcr_shadow = 0;
993 tup->lcr_shadow = 0;
994 tup->ier_shadow = 0;
995 tup->current_baud = 0;
996
997 ret = clk_prepare_enable(tup->uart_clk);
998 if (ret) {
999 dev_err(tup->uport.dev, "could not enable clk\n");
1000 return ret;
1001 }
1002
1003 /* Reset the UART controller to clear all previous status.*/
1004 reset_control_assert(tup->rst);
1005 udelay(10);
1006 reset_control_deassert(tup->rst);
1007
1008 tup->rx_in_progress = 0;
1009 tup->tx_in_progress = 0;
1010
1011 /*
1012 * Set the trigger level
1013 *
1014 * For PIO mode:
1015 *
1016 * For receive, this will interrupt the CPU after that many number of
1017 * bytes are received, for the remaining bytes the receive timeout
1018 * interrupt is received. Rx high watermark is set to 4.
1019 *
1020 * For transmit, if the trasnmit interrupt is enabled, this will
1021 * interrupt the CPU when the number of entries in the FIFO reaches the
1022 * low watermark. Tx low watermark is set to 16 bytes.
1023 *
1024 * For DMA mode:
1025 *
1026 * Set the Tx trigger to 16. This should match the DMA burst size that
1027 * programmed in the DMA registers.
1028 */
1029 tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
1030
1031 if (tup->use_rx_pio) {
1032 tup->fcr_shadow |= UART_FCR_R_TRIG_11;
1033 } else {
1034 if (tup->cdata->max_dma_burst_bytes == 8)
1035 tup->fcr_shadow |= UART_FCR_R_TRIG_10;
1036 else
1037 tup->fcr_shadow |= UART_FCR_R_TRIG_01;
1038 }
1039
1040 tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
1041 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1042
1043 /* Dummy read to ensure the write is posted */
1044 tegra_uart_read(tup, UART_SCR);
1045
1046 if (tup->cdata->fifo_mode_enable_status) {
1047 ret = tegra_uart_wait_fifo_mode_enabled(tup);
1048 if (ret < 0) {
1049 dev_err(tup->uport.dev,
1050 "Failed to enable FIFO mode: %d\n", ret);
1051 return ret;
1052 }
1053 } else {
1054 /*
1055 * For all tegra devices (up to t210), there is a hardware
1056 * issue that requires software to wait for 3 UART clock
1057 * periods after enabling the TX fifo, otherwise data could
1058 * be lost.
1059 */
1060 tegra_uart_wait_cycle_time(tup, 3);
1061 }
1062
1063 /*
1064 * Initialize the UART with default configuration
1065 * (115200, N, 8, 1) so that the receive DMA buffer may be
1066 * enqueued
1067 */
1068 ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
1069 if (ret < 0) {
1070 dev_err(tup->uport.dev, "Failed to set baud rate\n");
1071 return ret;
1072 }
1073 if (!tup->use_rx_pio) {
1074 tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
1075 tup->fcr_shadow |= UART_FCR_DMA_SELECT;
1076 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1077 } else {
1078 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1079 }
1080 tup->rx_in_progress = 1;
1081
1082 /*
1083 * Enable IE_RXS for the receive status interrupts like line errros.
1084 * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
1085 *
1086 * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
1087 * the DATA is sitting in the FIFO and couldn't be transferred to the
1088 * DMA as the DMA size alignment (4 bytes) is not met. EORD will be
1089 * triggered when there is a pause of the incomming data stream for 4
1090 * characters long.
1091 *
1092 * For pauses in the data which is not aligned to 4 bytes, we get
1093 * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
1094 * then the EORD.
1095 */
1096 tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
1097
1098 /*
1099 * If using DMA mode, enable EORD interrupt to notify about RX
1100 * completion.
1101 */
1102 if (!tup->use_rx_pio)
1103 tup->ier_shadow |= TEGRA_UART_IER_EORD;
1104
1105 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1106 return 0;
1107 }
1108
tegra_uart_dma_channel_free(struct tegra_uart_port * tup,bool dma_to_memory)1109 static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
1110 bool dma_to_memory)
1111 {
1112 if (dma_to_memory) {
1113 dmaengine_terminate_all(tup->rx_dma_chan);
1114 dma_release_channel(tup->rx_dma_chan);
1115 dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
1116 tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
1117 tup->rx_dma_chan = NULL;
1118 tup->rx_dma_buf_phys = 0;
1119 tup->rx_dma_buf_virt = NULL;
1120 } else {
1121 dmaengine_terminate_all(tup->tx_dma_chan);
1122 dma_release_channel(tup->tx_dma_chan);
1123 dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
1124 UART_XMIT_SIZE, DMA_TO_DEVICE);
1125 tup->tx_dma_chan = NULL;
1126 tup->tx_dma_buf_phys = 0;
1127 tup->tx_dma_buf_virt = NULL;
1128 }
1129 }
1130
tegra_uart_dma_channel_allocate(struct tegra_uart_port * tup,bool dma_to_memory)1131 static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
1132 bool dma_to_memory)
1133 {
1134 struct dma_chan *dma_chan;
1135 unsigned char *dma_buf;
1136 dma_addr_t dma_phys;
1137 int ret;
1138 struct dma_slave_config dma_sconfig;
1139
1140 dma_chan = dma_request_chan(tup->uport.dev, dma_to_memory ? "rx" : "tx");
1141 if (IS_ERR(dma_chan)) {
1142 ret = PTR_ERR(dma_chan);
1143 dev_err(tup->uport.dev,
1144 "DMA channel alloc failed: %d\n", ret);
1145 return ret;
1146 }
1147
1148 if (dma_to_memory) {
1149 dma_buf = dma_alloc_coherent(tup->uport.dev,
1150 TEGRA_UART_RX_DMA_BUFFER_SIZE,
1151 &dma_phys, GFP_KERNEL);
1152 if (!dma_buf) {
1153 dev_err(tup->uport.dev,
1154 "Not able to allocate the dma buffer\n");
1155 dma_release_channel(dma_chan);
1156 return -ENOMEM;
1157 }
1158 dma_sync_single_for_device(tup->uport.dev, dma_phys,
1159 TEGRA_UART_RX_DMA_BUFFER_SIZE,
1160 DMA_TO_DEVICE);
1161 dma_sconfig.src_addr = tup->uport.mapbase;
1162 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1163 dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes;
1164 tup->rx_dma_chan = dma_chan;
1165 tup->rx_dma_buf_virt = dma_buf;
1166 tup->rx_dma_buf_phys = dma_phys;
1167 } else {
1168 dma_phys = dma_map_single(tup->uport.dev,
1169 tup->uport.state->xmit.buf, UART_XMIT_SIZE,
1170 DMA_TO_DEVICE);
1171 if (dma_mapping_error(tup->uport.dev, dma_phys)) {
1172 dev_err(tup->uport.dev, "dma_map_single tx failed\n");
1173 dma_release_channel(dma_chan);
1174 return -ENOMEM;
1175 }
1176 dma_buf = tup->uport.state->xmit.buf;
1177 dma_sconfig.dst_addr = tup->uport.mapbase;
1178 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1179 dma_sconfig.dst_maxburst = 16;
1180 tup->tx_dma_chan = dma_chan;
1181 tup->tx_dma_buf_virt = dma_buf;
1182 tup->tx_dma_buf_phys = dma_phys;
1183 }
1184
1185 ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
1186 if (ret < 0) {
1187 dev_err(tup->uport.dev,
1188 "Dma slave config failed, err = %d\n", ret);
1189 tegra_uart_dma_channel_free(tup, dma_to_memory);
1190 return ret;
1191 }
1192
1193 return 0;
1194 }
1195
tegra_uart_startup(struct uart_port * u)1196 static int tegra_uart_startup(struct uart_port *u)
1197 {
1198 struct tegra_uart_port *tup = to_tegra_uport(u);
1199 int ret;
1200
1201 if (!tup->use_tx_pio) {
1202 ret = tegra_uart_dma_channel_allocate(tup, false);
1203 if (ret < 0) {
1204 dev_err(u->dev, "Tx Dma allocation failed, err = %d\n",
1205 ret);
1206 return ret;
1207 }
1208 }
1209
1210 if (!tup->use_rx_pio) {
1211 ret = tegra_uart_dma_channel_allocate(tup, true);
1212 if (ret < 0) {
1213 dev_err(u->dev, "Rx Dma allocation failed, err = %d\n",
1214 ret);
1215 goto fail_rx_dma;
1216 }
1217 }
1218
1219 ret = tegra_uart_hw_init(tup);
1220 if (ret < 0) {
1221 dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
1222 goto fail_hw_init;
1223 }
1224
1225 ret = request_irq(u->irq, tegra_uart_isr, 0,
1226 dev_name(u->dev), tup);
1227 if (ret < 0) {
1228 dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
1229 goto fail_hw_init;
1230 }
1231 return 0;
1232
1233 fail_hw_init:
1234 if (!tup->use_rx_pio)
1235 tegra_uart_dma_channel_free(tup, true);
1236 fail_rx_dma:
1237 if (!tup->use_tx_pio)
1238 tegra_uart_dma_channel_free(tup, false);
1239 return ret;
1240 }
1241
1242 /*
1243 * Flush any TX data submitted for DMA and PIO. Called when the
1244 * TX circular buffer is reset.
1245 */
tegra_uart_flush_buffer(struct uart_port * u)1246 static void tegra_uart_flush_buffer(struct uart_port *u)
1247 {
1248 struct tegra_uart_port *tup = to_tegra_uport(u);
1249
1250 tup->tx_bytes = 0;
1251 if (tup->tx_dma_chan)
1252 dmaengine_terminate_all(tup->tx_dma_chan);
1253 }
1254
tegra_uart_shutdown(struct uart_port * u)1255 static void tegra_uart_shutdown(struct uart_port *u)
1256 {
1257 struct tegra_uart_port *tup = to_tegra_uport(u);
1258
1259 tegra_uart_hw_deinit(tup);
1260 free_irq(u->irq, tup);
1261 }
1262
tegra_uart_enable_ms(struct uart_port * u)1263 static void tegra_uart_enable_ms(struct uart_port *u)
1264 {
1265 struct tegra_uart_port *tup = to_tegra_uport(u);
1266
1267 if (tup->enable_modem_interrupt) {
1268 tup->ier_shadow |= UART_IER_MSI;
1269 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1270 }
1271 }
1272
tegra_uart_set_termios(struct uart_port * u,struct ktermios * termios,struct ktermios * oldtermios)1273 static void tegra_uart_set_termios(struct uart_port *u,
1274 struct ktermios *termios, struct ktermios *oldtermios)
1275 {
1276 struct tegra_uart_port *tup = to_tegra_uport(u);
1277 unsigned int baud;
1278 unsigned long flags;
1279 unsigned int lcr;
1280 int symb_bit = 1;
1281 struct clk *parent_clk = clk_get_parent(tup->uart_clk);
1282 unsigned long parent_clk_rate = clk_get_rate(parent_clk);
1283 int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
1284 int ret;
1285
1286 max_divider *= 16;
1287 spin_lock_irqsave(&u->lock, flags);
1288
1289 /* Changing configuration, it is safe to stop any rx now */
1290 if (tup->rts_active)
1291 set_rts(tup, false);
1292
1293 /* Clear all interrupts as configuration is going to be changed */
1294 tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
1295 tegra_uart_read(tup, UART_IER);
1296 tegra_uart_write(tup, 0, UART_IER);
1297 tegra_uart_read(tup, UART_IER);
1298
1299 /* Parity */
1300 lcr = tup->lcr_shadow;
1301 lcr &= ~UART_LCR_PARITY;
1302
1303 /* CMSPAR isn't supported by this driver */
1304 termios->c_cflag &= ~CMSPAR;
1305
1306 if ((termios->c_cflag & PARENB) == PARENB) {
1307 symb_bit++;
1308 if (termios->c_cflag & PARODD) {
1309 lcr |= UART_LCR_PARITY;
1310 lcr &= ~UART_LCR_EPAR;
1311 lcr &= ~UART_LCR_SPAR;
1312 } else {
1313 lcr |= UART_LCR_PARITY;
1314 lcr |= UART_LCR_EPAR;
1315 lcr &= ~UART_LCR_SPAR;
1316 }
1317 }
1318
1319 lcr &= ~UART_LCR_WLEN8;
1320 switch (termios->c_cflag & CSIZE) {
1321 case CS5:
1322 lcr |= UART_LCR_WLEN5;
1323 symb_bit += 5;
1324 break;
1325 case CS6:
1326 lcr |= UART_LCR_WLEN6;
1327 symb_bit += 6;
1328 break;
1329 case CS7:
1330 lcr |= UART_LCR_WLEN7;
1331 symb_bit += 7;
1332 break;
1333 default:
1334 lcr |= UART_LCR_WLEN8;
1335 symb_bit += 8;
1336 break;
1337 }
1338
1339 /* Stop bits */
1340 if (termios->c_cflag & CSTOPB) {
1341 lcr |= UART_LCR_STOP;
1342 symb_bit += 2;
1343 } else {
1344 lcr &= ~UART_LCR_STOP;
1345 symb_bit++;
1346 }
1347
1348 tegra_uart_write(tup, lcr, UART_LCR);
1349 tup->lcr_shadow = lcr;
1350 tup->symb_bit = symb_bit;
1351
1352 /* Baud rate. */
1353 baud = uart_get_baud_rate(u, termios, oldtermios,
1354 parent_clk_rate/max_divider,
1355 parent_clk_rate/16);
1356 spin_unlock_irqrestore(&u->lock, flags);
1357 ret = tegra_set_baudrate(tup, baud);
1358 if (ret < 0) {
1359 dev_err(tup->uport.dev, "Failed to set baud rate\n");
1360 return;
1361 }
1362 if (tty_termios_baud_rate(termios))
1363 tty_termios_encode_baud_rate(termios, baud, baud);
1364 spin_lock_irqsave(&u->lock, flags);
1365
1366 /* Flow control */
1367 if (termios->c_cflag & CRTSCTS) {
1368 tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
1369 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1370 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1371 /* if top layer has asked to set rts active then do so here */
1372 if (tup->rts_active)
1373 set_rts(tup, true);
1374 } else {
1375 tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
1376 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1377 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1378 }
1379
1380 /* update the port timeout based on new settings */
1381 uart_update_timeout(u, termios->c_cflag, baud);
1382
1383 /* Make sure all writes have completed */
1384 tegra_uart_read(tup, UART_IER);
1385
1386 /* Re-enable interrupt */
1387 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1388 tegra_uart_read(tup, UART_IER);
1389
1390 tup->uport.ignore_status_mask = 0;
1391 /* Ignore all characters if CREAD is not set */
1392 if ((termios->c_cflag & CREAD) == 0)
1393 tup->uport.ignore_status_mask |= UART_LSR_DR;
1394 if (termios->c_iflag & IGNBRK)
1395 tup->uport.ignore_status_mask |= UART_LSR_BI;
1396
1397 spin_unlock_irqrestore(&u->lock, flags);
1398 }
1399
tegra_uart_type(struct uart_port * u)1400 static const char *tegra_uart_type(struct uart_port *u)
1401 {
1402 return TEGRA_UART_TYPE;
1403 }
1404
1405 static const struct uart_ops tegra_uart_ops = {
1406 .tx_empty = tegra_uart_tx_empty,
1407 .set_mctrl = tegra_uart_set_mctrl,
1408 .get_mctrl = tegra_uart_get_mctrl,
1409 .stop_tx = tegra_uart_stop_tx,
1410 .start_tx = tegra_uart_start_tx,
1411 .stop_rx = tegra_uart_stop_rx,
1412 .flush_buffer = tegra_uart_flush_buffer,
1413 .enable_ms = tegra_uart_enable_ms,
1414 .break_ctl = tegra_uart_break_ctl,
1415 .startup = tegra_uart_startup,
1416 .shutdown = tegra_uart_shutdown,
1417 .set_termios = tegra_uart_set_termios,
1418 .type = tegra_uart_type,
1419 .request_port = tegra_uart_request_port,
1420 .release_port = tegra_uart_release_port,
1421 };
1422
1423 static struct uart_driver tegra_uart_driver = {
1424 .owner = THIS_MODULE,
1425 .driver_name = "tegra_hsuart",
1426 .dev_name = "ttyTHS",
1427 .cons = NULL,
1428 .nr = TEGRA_UART_MAXIMUM,
1429 };
1430
tegra_uart_parse_dt(struct platform_device * pdev,struct tegra_uart_port * tup)1431 static int tegra_uart_parse_dt(struct platform_device *pdev,
1432 struct tegra_uart_port *tup)
1433 {
1434 struct device_node *np = pdev->dev.of_node;
1435 int port;
1436 int ret;
1437 int index;
1438 u32 pval;
1439 int count;
1440 int n_entries;
1441
1442 port = of_alias_get_id(np, "serial");
1443 if (port < 0) {
1444 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
1445 return port;
1446 }
1447 tup->uport.line = port;
1448
1449 tup->enable_modem_interrupt = of_property_read_bool(np,
1450 "nvidia,enable-modem-interrupt");
1451
1452 index = of_property_match_string(np, "dma-names", "rx");
1453 if (index < 0) {
1454 tup->use_rx_pio = true;
1455 dev_info(&pdev->dev, "RX in PIO mode\n");
1456 }
1457 index = of_property_match_string(np, "dma-names", "tx");
1458 if (index < 0) {
1459 tup->use_tx_pio = true;
1460 dev_info(&pdev->dev, "TX in PIO mode\n");
1461 }
1462
1463 n_entries = of_property_count_u32_elems(np, "nvidia,adjust-baud-rates");
1464 if (n_entries > 0) {
1465 tup->n_adjustable_baud_rates = n_entries / 3;
1466 tup->baud_tolerance =
1467 devm_kzalloc(&pdev->dev, (tup->n_adjustable_baud_rates) *
1468 sizeof(*tup->baud_tolerance), GFP_KERNEL);
1469 if (!tup->baud_tolerance)
1470 return -ENOMEM;
1471 for (count = 0, index = 0; count < n_entries; count += 3,
1472 index++) {
1473 ret =
1474 of_property_read_u32_index(np,
1475 "nvidia,adjust-baud-rates",
1476 count, &pval);
1477 if (!ret)
1478 tup->baud_tolerance[index].lower_range_baud =
1479 pval;
1480 ret =
1481 of_property_read_u32_index(np,
1482 "nvidia,adjust-baud-rates",
1483 count + 1, &pval);
1484 if (!ret)
1485 tup->baud_tolerance[index].upper_range_baud =
1486 pval;
1487 ret =
1488 of_property_read_u32_index(np,
1489 "nvidia,adjust-baud-rates",
1490 count + 2, &pval);
1491 if (!ret)
1492 tup->baud_tolerance[index].tolerance =
1493 (s32)pval;
1494 }
1495 } else {
1496 tup->n_adjustable_baud_rates = 0;
1497 }
1498
1499 return 0;
1500 }
1501
1502 static struct tegra_uart_chip_data tegra20_uart_chip_data = {
1503 .tx_fifo_full_status = false,
1504 .allow_txfifo_reset_fifo_mode = true,
1505 .support_clk_src_div = false,
1506 .fifo_mode_enable_status = false,
1507 .uart_max_port = 5,
1508 .max_dma_burst_bytes = 4,
1509 .error_tolerance_low_range = -4,
1510 .error_tolerance_high_range = 4,
1511 };
1512
1513 static struct tegra_uart_chip_data tegra30_uart_chip_data = {
1514 .tx_fifo_full_status = true,
1515 .allow_txfifo_reset_fifo_mode = false,
1516 .support_clk_src_div = true,
1517 .fifo_mode_enable_status = false,
1518 .uart_max_port = 5,
1519 .max_dma_burst_bytes = 4,
1520 .error_tolerance_low_range = -4,
1521 .error_tolerance_high_range = 4,
1522 };
1523
1524 static struct tegra_uart_chip_data tegra186_uart_chip_data = {
1525 .tx_fifo_full_status = true,
1526 .allow_txfifo_reset_fifo_mode = false,
1527 .support_clk_src_div = true,
1528 .fifo_mode_enable_status = true,
1529 .uart_max_port = 8,
1530 .max_dma_burst_bytes = 8,
1531 .error_tolerance_low_range = 0,
1532 .error_tolerance_high_range = 4,
1533 };
1534
1535 static struct tegra_uart_chip_data tegra194_uart_chip_data = {
1536 .tx_fifo_full_status = true,
1537 .allow_txfifo_reset_fifo_mode = false,
1538 .support_clk_src_div = true,
1539 .fifo_mode_enable_status = true,
1540 .uart_max_port = 8,
1541 .max_dma_burst_bytes = 8,
1542 .error_tolerance_low_range = -2,
1543 .error_tolerance_high_range = 2,
1544 };
1545
1546 static const struct of_device_id tegra_uart_of_match[] = {
1547 {
1548 .compatible = "nvidia,tegra30-hsuart",
1549 .data = &tegra30_uart_chip_data,
1550 }, {
1551 .compatible = "nvidia,tegra20-hsuart",
1552 .data = &tegra20_uart_chip_data,
1553 }, {
1554 .compatible = "nvidia,tegra186-hsuart",
1555 .data = &tegra186_uart_chip_data,
1556 }, {
1557 .compatible = "nvidia,tegra194-hsuart",
1558 .data = &tegra194_uart_chip_data,
1559 }, {
1560 },
1561 };
1562 MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
1563
tegra_uart_probe(struct platform_device * pdev)1564 static int tegra_uart_probe(struct platform_device *pdev)
1565 {
1566 struct tegra_uart_port *tup;
1567 struct uart_port *u;
1568 struct resource *resource;
1569 int ret;
1570 const struct tegra_uart_chip_data *cdata;
1571 const struct of_device_id *match;
1572
1573 match = of_match_device(tegra_uart_of_match, &pdev->dev);
1574 if (!match) {
1575 dev_err(&pdev->dev, "Error: No device match found\n");
1576 return -ENODEV;
1577 }
1578 cdata = match->data;
1579
1580 tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
1581 if (!tup) {
1582 dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
1583 return -ENOMEM;
1584 }
1585
1586 ret = tegra_uart_parse_dt(pdev, tup);
1587 if (ret < 0)
1588 return ret;
1589
1590 u = &tup->uport;
1591 u->dev = &pdev->dev;
1592 u->ops = &tegra_uart_ops;
1593 u->type = PORT_TEGRA;
1594 u->fifosize = 32;
1595 tup->cdata = cdata;
1596
1597 platform_set_drvdata(pdev, tup);
1598 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1599 if (!resource) {
1600 dev_err(&pdev->dev, "No IO memory resource\n");
1601 return -ENODEV;
1602 }
1603
1604 u->mapbase = resource->start;
1605 u->membase = devm_ioremap_resource(&pdev->dev, resource);
1606 if (IS_ERR(u->membase))
1607 return PTR_ERR(u->membase);
1608
1609 tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
1610 if (IS_ERR(tup->uart_clk)) {
1611 dev_err(&pdev->dev, "Couldn't get the clock\n");
1612 return PTR_ERR(tup->uart_clk);
1613 }
1614
1615 tup->rst = devm_reset_control_get_exclusive(&pdev->dev, "serial");
1616 if (IS_ERR(tup->rst)) {
1617 dev_err(&pdev->dev, "Couldn't get the reset\n");
1618 return PTR_ERR(tup->rst);
1619 }
1620
1621 u->iotype = UPIO_MEM32;
1622 ret = platform_get_irq(pdev, 0);
1623 if (ret < 0)
1624 return ret;
1625 u->irq = ret;
1626 u->regshift = 2;
1627 ret = uart_add_one_port(&tegra_uart_driver, u);
1628 if (ret < 0) {
1629 dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
1630 return ret;
1631 }
1632 return ret;
1633 }
1634
tegra_uart_remove(struct platform_device * pdev)1635 static int tegra_uart_remove(struct platform_device *pdev)
1636 {
1637 struct tegra_uart_port *tup = platform_get_drvdata(pdev);
1638 struct uart_port *u = &tup->uport;
1639
1640 uart_remove_one_port(&tegra_uart_driver, u);
1641 return 0;
1642 }
1643
1644 #ifdef CONFIG_PM_SLEEP
tegra_uart_suspend(struct device * dev)1645 static int tegra_uart_suspend(struct device *dev)
1646 {
1647 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1648 struct uart_port *u = &tup->uport;
1649
1650 return uart_suspend_port(&tegra_uart_driver, u);
1651 }
1652
tegra_uart_resume(struct device * dev)1653 static int tegra_uart_resume(struct device *dev)
1654 {
1655 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1656 struct uart_port *u = &tup->uport;
1657
1658 return uart_resume_port(&tegra_uart_driver, u);
1659 }
1660 #endif
1661
1662 static const struct dev_pm_ops tegra_uart_pm_ops = {
1663 SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
1664 };
1665
1666 static struct platform_driver tegra_uart_platform_driver = {
1667 .probe = tegra_uart_probe,
1668 .remove = tegra_uart_remove,
1669 .driver = {
1670 .name = "serial-tegra",
1671 .of_match_table = tegra_uart_of_match,
1672 .pm = &tegra_uart_pm_ops,
1673 },
1674 };
1675
tegra_uart_init(void)1676 static int __init tegra_uart_init(void)
1677 {
1678 int ret;
1679 struct device_node *node;
1680 const struct of_device_id *match = NULL;
1681 const struct tegra_uart_chip_data *cdata = NULL;
1682
1683 node = of_find_matching_node(NULL, tegra_uart_of_match);
1684 if (node)
1685 match = of_match_node(tegra_uart_of_match, node);
1686 if (match)
1687 cdata = match->data;
1688 if (cdata)
1689 tegra_uart_driver.nr = cdata->uart_max_port;
1690
1691 ret = uart_register_driver(&tegra_uart_driver);
1692 if (ret < 0) {
1693 pr_err("Could not register %s driver\n",
1694 tegra_uart_driver.driver_name);
1695 return ret;
1696 }
1697
1698 ret = platform_driver_register(&tegra_uart_platform_driver);
1699 if (ret < 0) {
1700 pr_err("Uart platform driver register failed, e = %d\n", ret);
1701 uart_unregister_driver(&tegra_uart_driver);
1702 return ret;
1703 }
1704 return 0;
1705 }
1706
tegra_uart_exit(void)1707 static void __exit tegra_uart_exit(void)
1708 {
1709 pr_info("Unloading tegra uart driver\n");
1710 platform_driver_unregister(&tegra_uart_platform_driver);
1711 uart_unregister_driver(&tegra_uart_driver);
1712 }
1713
1714 module_init(tegra_uart_init);
1715 module_exit(tegra_uart_exit);
1716
1717 MODULE_ALIAS("platform:serial-tegra");
1718 MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
1719 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1720 MODULE_LICENSE("GPL v2");
1721