1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Driver for Atmel AT91 Serial ports
4 * Copyright (C) 2003 Rick Bronson
5 *
6 * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
7 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
8 *
9 * DMA support added by Chip Coldwell.
10 */
11 #include <linux/tty.h>
12 #include <linux/ioport.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/serial.h>
16 #include <linux/clk.h>
17 #include <linux/console.h>
18 #include <linux/sysrq.h>
19 #include <linux/tty_flip.h>
20 #include <linux/platform_device.h>
21 #include <linux/of.h>
22 #include <linux/of_device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmaengine.h>
25 #include <linux/atmel_pdc.h>
26 #include <linux/uaccess.h>
27 #include <linux/platform_data/atmel.h>
28 #include <linux/timer.h>
29 #include <linux/err.h>
30 #include <linux/irq.h>
31 #include <linux/suspend.h>
32 #include <linux/mm.h>
33
34 #include <asm/div64.h>
35 #include <asm/io.h>
36 #include <asm/ioctls.h>
37
38 #define PDC_BUFFER_SIZE 512
39 /* Revisit: We should calculate this based on the actual port settings */
40 #define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */
41
42 /* The minium number of data FIFOs should be able to contain */
43 #define ATMEL_MIN_FIFO_SIZE 8
44 /*
45 * These two offsets are substracted from the RX FIFO size to define the RTS
46 * high and low thresholds
47 */
48 #define ATMEL_RTS_HIGH_OFFSET 16
49 #define ATMEL_RTS_LOW_OFFSET 20
50
51 #include <linux/serial_core.h>
52
53 #include "serial_mctrl_gpio.h"
54 #include "atmel_serial.h"
55
56 static void atmel_start_rx(struct uart_port *port);
57 static void atmel_stop_rx(struct uart_port *port);
58
59 #ifdef CONFIG_SERIAL_ATMEL_TTYAT
60
61 /* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we
62 * should coexist with the 8250 driver, such as if we have an external 16C550
63 * UART. */
64 #define SERIAL_ATMEL_MAJOR 204
65 #define MINOR_START 154
66 #define ATMEL_DEVICENAME "ttyAT"
67
68 #else
69
70 /* Use device name ttyS, major 4, minor 64-68. This is the usual serial port
71 * name, but it is legally reserved for the 8250 driver. */
72 #define SERIAL_ATMEL_MAJOR TTY_MAJOR
73 #define MINOR_START 64
74 #define ATMEL_DEVICENAME "ttyS"
75
76 #endif
77
78 #define ATMEL_ISR_PASS_LIMIT 256
79
80 struct atmel_dma_buffer {
81 unsigned char *buf;
82 dma_addr_t dma_addr;
83 unsigned int dma_size;
84 unsigned int ofs;
85 };
86
87 struct atmel_uart_char {
88 u16 status;
89 u16 ch;
90 };
91
92 /*
93 * Be careful, the real size of the ring buffer is
94 * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer
95 * can contain up to 1024 characters in PIO mode and up to 4096 characters in
96 * DMA mode.
97 */
98 #define ATMEL_SERIAL_RINGSIZE 1024
99
100 /*
101 * at91: 6 USARTs and one DBGU port (SAM9260)
102 * samx7: 3 USARTs and 5 UARTs
103 */
104 #define ATMEL_MAX_UART 8
105
106 /*
107 * We wrap our port structure around the generic uart_port.
108 */
109 struct atmel_uart_port {
110 struct uart_port uart; /* uart */
111 struct clk *clk; /* uart clock */
112 int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */
113 u32 backup_imr; /* IMR saved during suspend */
114 int break_active; /* break being received */
115
116 bool use_dma_rx; /* enable DMA receiver */
117 bool use_pdc_rx; /* enable PDC receiver */
118 short pdc_rx_idx; /* current PDC RX buffer */
119 struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */
120
121 bool use_dma_tx; /* enable DMA transmitter */
122 bool use_pdc_tx; /* enable PDC transmitter */
123 struct atmel_dma_buffer pdc_tx; /* PDC transmitter */
124
125 spinlock_t lock_tx; /* port lock */
126 spinlock_t lock_rx; /* port lock */
127 struct dma_chan *chan_tx;
128 struct dma_chan *chan_rx;
129 struct dma_async_tx_descriptor *desc_tx;
130 struct dma_async_tx_descriptor *desc_rx;
131 dma_cookie_t cookie_tx;
132 dma_cookie_t cookie_rx;
133 struct scatterlist sg_tx;
134 struct scatterlist sg_rx;
135 struct tasklet_struct tasklet_rx;
136 struct tasklet_struct tasklet_tx;
137 atomic_t tasklet_shutdown;
138 unsigned int irq_status_prev;
139 unsigned int tx_len;
140
141 struct circ_buf rx_ring;
142
143 struct mctrl_gpios *gpios;
144 u32 backup_mode; /* MR saved during iso7816 operations */
145 u32 backup_brgr; /* BRGR saved during iso7816 operations */
146 unsigned int tx_done_mask;
147 u32 fifo_size;
148 u32 rts_high;
149 u32 rts_low;
150 bool ms_irq_enabled;
151 u32 rtor; /* address of receiver timeout register if it exists */
152 bool has_frac_baudrate;
153 bool has_hw_timer;
154 struct timer_list uart_timer;
155
156 bool tx_stopped;
157 bool suspended;
158 unsigned int pending;
159 unsigned int pending_status;
160 spinlock_t lock_suspended;
161
162 bool hd_start_rx; /* can start RX during half-duplex operation */
163
164 /* ISO7816 */
165 unsigned int fidi_min;
166 unsigned int fidi_max;
167
168 #ifdef CONFIG_PM
169 struct {
170 u32 cr;
171 u32 mr;
172 u32 imr;
173 u32 brgr;
174 u32 rtor;
175 u32 ttgr;
176 u32 fmr;
177 u32 fimr;
178 } cache;
179 #endif
180
181 int (*prepare_rx)(struct uart_port *port);
182 int (*prepare_tx)(struct uart_port *port);
183 void (*schedule_rx)(struct uart_port *port);
184 void (*schedule_tx)(struct uart_port *port);
185 void (*release_rx)(struct uart_port *port);
186 void (*release_tx)(struct uart_port *port);
187 };
188
189 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
190 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
191
192 #if defined(CONFIG_OF)
193 static const struct of_device_id atmel_serial_dt_ids[] = {
194 { .compatible = "atmel,at91rm9200-usart-serial" },
195 { /* sentinel */ }
196 };
197 #endif
198
199 static inline struct atmel_uart_port *
to_atmel_uart_port(struct uart_port * uart)200 to_atmel_uart_port(struct uart_port *uart)
201 {
202 return container_of(uart, struct atmel_uart_port, uart);
203 }
204
atmel_uart_readl(struct uart_port * port,u32 reg)205 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg)
206 {
207 return __raw_readl(port->membase + reg);
208 }
209
atmel_uart_writel(struct uart_port * port,u32 reg,u32 value)210 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
211 {
212 __raw_writel(value, port->membase + reg);
213 }
214
atmel_uart_read_char(struct uart_port * port)215 static inline u8 atmel_uart_read_char(struct uart_port *port)
216 {
217 return __raw_readb(port->membase + ATMEL_US_RHR);
218 }
219
atmel_uart_write_char(struct uart_port * port,u8 value)220 static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
221 {
222 __raw_writeb(value, port->membase + ATMEL_US_THR);
223 }
224
atmel_uart_is_half_duplex(struct uart_port * port)225 static inline int atmel_uart_is_half_duplex(struct uart_port *port)
226 {
227 return ((port->rs485.flags & SER_RS485_ENABLED) &&
228 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
229 (port->iso7816.flags & SER_ISO7816_ENABLED);
230 }
231
232 #ifdef CONFIG_SERIAL_ATMEL_PDC
atmel_use_pdc_rx(struct uart_port * port)233 static bool atmel_use_pdc_rx(struct uart_port *port)
234 {
235 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
236
237 return atmel_port->use_pdc_rx;
238 }
239
atmel_use_pdc_tx(struct uart_port * port)240 static bool atmel_use_pdc_tx(struct uart_port *port)
241 {
242 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
243
244 return atmel_port->use_pdc_tx;
245 }
246 #else
atmel_use_pdc_rx(struct uart_port * port)247 static bool atmel_use_pdc_rx(struct uart_port *port)
248 {
249 return false;
250 }
251
atmel_use_pdc_tx(struct uart_port * port)252 static bool atmel_use_pdc_tx(struct uart_port *port)
253 {
254 return false;
255 }
256 #endif
257
atmel_use_dma_tx(struct uart_port * port)258 static bool atmel_use_dma_tx(struct uart_port *port)
259 {
260 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
261
262 return atmel_port->use_dma_tx;
263 }
264
atmel_use_dma_rx(struct uart_port * port)265 static bool atmel_use_dma_rx(struct uart_port *port)
266 {
267 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
268
269 return atmel_port->use_dma_rx;
270 }
271
atmel_use_fifo(struct uart_port * port)272 static bool atmel_use_fifo(struct uart_port *port)
273 {
274 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
275
276 return atmel_port->fifo_size;
277 }
278
atmel_tasklet_schedule(struct atmel_uart_port * atmel_port,struct tasklet_struct * t)279 static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
280 struct tasklet_struct *t)
281 {
282 if (!atomic_read(&atmel_port->tasklet_shutdown))
283 tasklet_schedule(t);
284 }
285
286 /* Enable or disable the rs485 support */
atmel_config_rs485(struct uart_port * port,struct serial_rs485 * rs485conf)287 static int atmel_config_rs485(struct uart_port *port,
288 struct serial_rs485 *rs485conf)
289 {
290 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
291 unsigned int mode;
292
293 /* Disable interrupts */
294 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
295
296 mode = atmel_uart_readl(port, ATMEL_US_MR);
297
298 if (rs485conf->flags & SER_RS485_ENABLED) {
299 dev_dbg(port->dev, "Setting UART to RS485\n");
300 if (rs485conf->flags & SER_RS485_RX_DURING_TX)
301 atmel_port->tx_done_mask = ATMEL_US_TXRDY;
302 else
303 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
304
305 atmel_uart_writel(port, ATMEL_US_TTGR,
306 rs485conf->delay_rts_after_send);
307 mode &= ~ATMEL_US_USMODE;
308 mode |= ATMEL_US_USMODE_RS485;
309 } else {
310 dev_dbg(port->dev, "Setting UART to RS232\n");
311 if (atmel_use_pdc_tx(port))
312 atmel_port->tx_done_mask = ATMEL_US_ENDTX |
313 ATMEL_US_TXBUFE;
314 else
315 atmel_port->tx_done_mask = ATMEL_US_TXRDY;
316 }
317 atmel_uart_writel(port, ATMEL_US_MR, mode);
318
319 /* Enable interrupts */
320 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
321
322 return 0;
323 }
324
atmel_calc_cd(struct uart_port * port,struct serial_iso7816 * iso7816conf)325 static unsigned int atmel_calc_cd(struct uart_port *port,
326 struct serial_iso7816 *iso7816conf)
327 {
328 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
329 unsigned int cd;
330 u64 mck_rate;
331
332 mck_rate = (u64)clk_get_rate(atmel_port->clk);
333 do_div(mck_rate, iso7816conf->clk);
334 cd = mck_rate;
335 return cd;
336 }
337
atmel_calc_fidi(struct uart_port * port,struct serial_iso7816 * iso7816conf)338 static unsigned int atmel_calc_fidi(struct uart_port *port,
339 struct serial_iso7816 *iso7816conf)
340 {
341 u64 fidi = 0;
342
343 if (iso7816conf->sc_fi && iso7816conf->sc_di) {
344 fidi = (u64)iso7816conf->sc_fi;
345 do_div(fidi, iso7816conf->sc_di);
346 }
347 return (u32)fidi;
348 }
349
350 /* Enable or disable the iso7816 support */
351 /* Called with interrupts disabled */
atmel_config_iso7816(struct uart_port * port,struct serial_iso7816 * iso7816conf)352 static int atmel_config_iso7816(struct uart_port *port,
353 struct serial_iso7816 *iso7816conf)
354 {
355 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
356 unsigned int mode;
357 unsigned int cd, fidi;
358 int ret = 0;
359
360 /* Disable interrupts */
361 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
362
363 mode = atmel_uart_readl(port, ATMEL_US_MR);
364
365 if (iso7816conf->flags & SER_ISO7816_ENABLED) {
366 mode &= ~ATMEL_US_USMODE;
367
368 if (iso7816conf->tg > 255) {
369 dev_err(port->dev, "ISO7816: Timeguard exceeding 255\n");
370 memset(iso7816conf, 0, sizeof(struct serial_iso7816));
371 ret = -EINVAL;
372 goto err_out;
373 }
374
375 if ((iso7816conf->flags & SER_ISO7816_T_PARAM)
376 == SER_ISO7816_T(0)) {
377 mode |= ATMEL_US_USMODE_ISO7816_T0 | ATMEL_US_DSNACK;
378 } else if ((iso7816conf->flags & SER_ISO7816_T_PARAM)
379 == SER_ISO7816_T(1)) {
380 mode |= ATMEL_US_USMODE_ISO7816_T1 | ATMEL_US_INACK;
381 } else {
382 dev_err(port->dev, "ISO7816: Type not supported\n");
383 memset(iso7816conf, 0, sizeof(struct serial_iso7816));
384 ret = -EINVAL;
385 goto err_out;
386 }
387
388 mode &= ~(ATMEL_US_USCLKS | ATMEL_US_NBSTOP | ATMEL_US_PAR);
389
390 /* select mck clock, and output */
391 mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO;
392 /* set parity for normal/inverse mode + max iterations */
393 mode |= ATMEL_US_PAR_EVEN | ATMEL_US_NBSTOP_1 | ATMEL_US_MAX_ITER(3);
394
395 cd = atmel_calc_cd(port, iso7816conf);
396 fidi = atmel_calc_fidi(port, iso7816conf);
397 if (fidi == 0) {
398 dev_warn(port->dev, "ISO7816 fidi = 0, Generator generates no signal\n");
399 } else if (fidi < atmel_port->fidi_min
400 || fidi > atmel_port->fidi_max) {
401 dev_err(port->dev, "ISO7816 fidi = %u, value not supported\n", fidi);
402 memset(iso7816conf, 0, sizeof(struct serial_iso7816));
403 ret = -EINVAL;
404 goto err_out;
405 }
406
407 if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) {
408 /* port not yet in iso7816 mode: store configuration */
409 atmel_port->backup_mode = atmel_uart_readl(port, ATMEL_US_MR);
410 atmel_port->backup_brgr = atmel_uart_readl(port, ATMEL_US_BRGR);
411 }
412
413 atmel_uart_writel(port, ATMEL_US_TTGR, iso7816conf->tg);
414 atmel_uart_writel(port, ATMEL_US_BRGR, cd);
415 atmel_uart_writel(port, ATMEL_US_FIDI, fidi);
416
417 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXEN);
418 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY | ATMEL_US_NACK | ATMEL_US_ITERATION;
419 } else {
420 dev_dbg(port->dev, "Setting UART back to RS232\n");
421 /* back to last RS232 settings */
422 mode = atmel_port->backup_mode;
423 memset(iso7816conf, 0, sizeof(struct serial_iso7816));
424 atmel_uart_writel(port, ATMEL_US_TTGR, 0);
425 atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->backup_brgr);
426 atmel_uart_writel(port, ATMEL_US_FIDI, 0x174);
427
428 if (atmel_use_pdc_tx(port))
429 atmel_port->tx_done_mask = ATMEL_US_ENDTX |
430 ATMEL_US_TXBUFE;
431 else
432 atmel_port->tx_done_mask = ATMEL_US_TXRDY;
433 }
434
435 port->iso7816 = *iso7816conf;
436
437 atmel_uart_writel(port, ATMEL_US_MR, mode);
438
439 err_out:
440 /* Enable interrupts */
441 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
442
443 return ret;
444 }
445
446 /*
447 * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
448 */
atmel_tx_empty(struct uart_port * port)449 static u_int atmel_tx_empty(struct uart_port *port)
450 {
451 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
452
453 if (atmel_port->tx_stopped)
454 return TIOCSER_TEMT;
455 return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
456 TIOCSER_TEMT :
457 0;
458 }
459
460 /*
461 * Set state of the modem control output lines
462 */
atmel_set_mctrl(struct uart_port * port,u_int mctrl)463 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
464 {
465 unsigned int control = 0;
466 unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR);
467 unsigned int rts_paused, rts_ready;
468 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
469
470 /* override mode to RS485 if needed, otherwise keep the current mode */
471 if (port->rs485.flags & SER_RS485_ENABLED) {
472 atmel_uart_writel(port, ATMEL_US_TTGR,
473 port->rs485.delay_rts_after_send);
474 mode &= ~ATMEL_US_USMODE;
475 mode |= ATMEL_US_USMODE_RS485;
476 }
477
478 /* set the RTS line state according to the mode */
479 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
480 /* force RTS line to high level */
481 rts_paused = ATMEL_US_RTSEN;
482
483 /* give the control of the RTS line back to the hardware */
484 rts_ready = ATMEL_US_RTSDIS;
485 } else {
486 /* force RTS line to high level */
487 rts_paused = ATMEL_US_RTSDIS;
488
489 /* force RTS line to low level */
490 rts_ready = ATMEL_US_RTSEN;
491 }
492
493 if (mctrl & TIOCM_RTS)
494 control |= rts_ready;
495 else
496 control |= rts_paused;
497
498 if (mctrl & TIOCM_DTR)
499 control |= ATMEL_US_DTREN;
500 else
501 control |= ATMEL_US_DTRDIS;
502
503 atmel_uart_writel(port, ATMEL_US_CR, control);
504
505 mctrl_gpio_set(atmel_port->gpios, mctrl);
506
507 /* Local loopback mode? */
508 mode &= ~ATMEL_US_CHMODE;
509 if (mctrl & TIOCM_LOOP)
510 mode |= ATMEL_US_CHMODE_LOC_LOOP;
511 else
512 mode |= ATMEL_US_CHMODE_NORMAL;
513
514 atmel_uart_writel(port, ATMEL_US_MR, mode);
515 }
516
517 /*
518 * Get state of the modem control input lines
519 */
atmel_get_mctrl(struct uart_port * port)520 static u_int atmel_get_mctrl(struct uart_port *port)
521 {
522 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
523 unsigned int ret = 0, status;
524
525 status = atmel_uart_readl(port, ATMEL_US_CSR);
526
527 /*
528 * The control signals are active low.
529 */
530 if (!(status & ATMEL_US_DCD))
531 ret |= TIOCM_CD;
532 if (!(status & ATMEL_US_CTS))
533 ret |= TIOCM_CTS;
534 if (!(status & ATMEL_US_DSR))
535 ret |= TIOCM_DSR;
536 if (!(status & ATMEL_US_RI))
537 ret |= TIOCM_RI;
538
539 return mctrl_gpio_get(atmel_port->gpios, &ret);
540 }
541
542 /*
543 * Stop transmitting.
544 */
atmel_stop_tx(struct uart_port * port)545 static void atmel_stop_tx(struct uart_port *port)
546 {
547 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
548
549 if (atmel_use_pdc_tx(port)) {
550 /* disable PDC transmit */
551 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
552 }
553
554 /*
555 * Disable the transmitter.
556 * This is mandatory when DMA is used, otherwise the DMA buffer
557 * is fully transmitted.
558 */
559 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
560 atmel_port->tx_stopped = true;
561
562 /* Disable interrupts */
563 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
564
565 if (atmel_uart_is_half_duplex(port))
566 if (!atomic_read(&atmel_port->tasklet_shutdown))
567 atmel_start_rx(port);
568
569 }
570
571 /*
572 * Start transmitting.
573 */
atmel_start_tx(struct uart_port * port)574 static void atmel_start_tx(struct uart_port *port)
575 {
576 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
577
578 if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
579 & ATMEL_PDC_TXTEN))
580 /* The transmitter is already running. Yes, we
581 really need this.*/
582 return;
583
584 if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
585 if (atmel_uart_is_half_duplex(port))
586 atmel_stop_rx(port);
587
588 if (atmel_use_pdc_tx(port))
589 /* re-enable PDC transmit */
590 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
591
592 /* Enable interrupts */
593 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
594
595 /* re-enable the transmitter */
596 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
597 atmel_port->tx_stopped = false;
598 }
599
600 /*
601 * start receiving - port is in process of being opened.
602 */
atmel_start_rx(struct uart_port * port)603 static void atmel_start_rx(struct uart_port *port)
604 {
605 /* reset status and receiver */
606 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
607
608 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN);
609
610 if (atmel_use_pdc_rx(port)) {
611 /* enable PDC controller */
612 atmel_uart_writel(port, ATMEL_US_IER,
613 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
614 port->read_status_mask);
615 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
616 } else {
617 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
618 }
619 }
620
621 /*
622 * Stop receiving - port is in process of being closed.
623 */
atmel_stop_rx(struct uart_port * port)624 static void atmel_stop_rx(struct uart_port *port)
625 {
626 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS);
627
628 if (atmel_use_pdc_rx(port)) {
629 /* disable PDC receive */
630 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS);
631 atmel_uart_writel(port, ATMEL_US_IDR,
632 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
633 port->read_status_mask);
634 } else {
635 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY);
636 }
637 }
638
639 /*
640 * Enable modem status interrupts
641 */
atmel_enable_ms(struct uart_port * port)642 static void atmel_enable_ms(struct uart_port *port)
643 {
644 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
645 uint32_t ier = 0;
646
647 /*
648 * Interrupt should not be enabled twice
649 */
650 if (atmel_port->ms_irq_enabled)
651 return;
652
653 atmel_port->ms_irq_enabled = true;
654
655 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
656 ier |= ATMEL_US_CTSIC;
657
658 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
659 ier |= ATMEL_US_DSRIC;
660
661 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
662 ier |= ATMEL_US_RIIC;
663
664 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
665 ier |= ATMEL_US_DCDIC;
666
667 atmel_uart_writel(port, ATMEL_US_IER, ier);
668
669 mctrl_gpio_enable_ms(atmel_port->gpios);
670 }
671
672 /*
673 * Disable modem status interrupts
674 */
atmel_disable_ms(struct uart_port * port)675 static void atmel_disable_ms(struct uart_port *port)
676 {
677 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
678 uint32_t idr = 0;
679
680 /*
681 * Interrupt should not be disabled twice
682 */
683 if (!atmel_port->ms_irq_enabled)
684 return;
685
686 atmel_port->ms_irq_enabled = false;
687
688 mctrl_gpio_disable_ms(atmel_port->gpios);
689
690 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
691 idr |= ATMEL_US_CTSIC;
692
693 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
694 idr |= ATMEL_US_DSRIC;
695
696 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
697 idr |= ATMEL_US_RIIC;
698
699 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
700 idr |= ATMEL_US_DCDIC;
701
702 atmel_uart_writel(port, ATMEL_US_IDR, idr);
703 }
704
705 /*
706 * Control the transmission of a break signal
707 */
atmel_break_ctl(struct uart_port * port,int break_state)708 static void atmel_break_ctl(struct uart_port *port, int break_state)
709 {
710 if (break_state != 0)
711 /* start break */
712 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK);
713 else
714 /* stop break */
715 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK);
716 }
717
718 /*
719 * Stores the incoming character in the ring buffer
720 */
721 static void
atmel_buffer_rx_char(struct uart_port * port,unsigned int status,unsigned int ch)722 atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
723 unsigned int ch)
724 {
725 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
726 struct circ_buf *ring = &atmel_port->rx_ring;
727 struct atmel_uart_char *c;
728
729 if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
730 /* Buffer overflow, ignore char */
731 return;
732
733 c = &((struct atmel_uart_char *)ring->buf)[ring->head];
734 c->status = status;
735 c->ch = ch;
736
737 /* Make sure the character is stored before we update head. */
738 smp_wmb();
739
740 ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
741 }
742
743 /*
744 * Deal with parity, framing and overrun errors.
745 */
atmel_pdc_rxerr(struct uart_port * port,unsigned int status)746 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
747 {
748 /* clear error */
749 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
750
751 if (status & ATMEL_US_RXBRK) {
752 /* ignore side-effect */
753 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
754 port->icount.brk++;
755 }
756 if (status & ATMEL_US_PARE)
757 port->icount.parity++;
758 if (status & ATMEL_US_FRAME)
759 port->icount.frame++;
760 if (status & ATMEL_US_OVRE)
761 port->icount.overrun++;
762 }
763
764 /*
765 * Characters received (called from interrupt handler)
766 */
atmel_rx_chars(struct uart_port * port)767 static void atmel_rx_chars(struct uart_port *port)
768 {
769 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
770 unsigned int status, ch;
771
772 status = atmel_uart_readl(port, ATMEL_US_CSR);
773 while (status & ATMEL_US_RXRDY) {
774 ch = atmel_uart_read_char(port);
775
776 /*
777 * note that the error handling code is
778 * out of the main execution path
779 */
780 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
781 | ATMEL_US_OVRE | ATMEL_US_RXBRK)
782 || atmel_port->break_active)) {
783
784 /* clear error */
785 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
786
787 if (status & ATMEL_US_RXBRK
788 && !atmel_port->break_active) {
789 atmel_port->break_active = 1;
790 atmel_uart_writel(port, ATMEL_US_IER,
791 ATMEL_US_RXBRK);
792 } else {
793 /*
794 * This is either the end-of-break
795 * condition or we've received at
796 * least one character without RXBRK
797 * being set. In both cases, the next
798 * RXBRK will indicate start-of-break.
799 */
800 atmel_uart_writel(port, ATMEL_US_IDR,
801 ATMEL_US_RXBRK);
802 status &= ~ATMEL_US_RXBRK;
803 atmel_port->break_active = 0;
804 }
805 }
806
807 atmel_buffer_rx_char(port, status, ch);
808 status = atmel_uart_readl(port, ATMEL_US_CSR);
809 }
810
811 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
812 }
813
814 /*
815 * Transmit characters (called from tasklet with TXRDY interrupt
816 * disabled)
817 */
atmel_tx_chars(struct uart_port * port)818 static void atmel_tx_chars(struct uart_port *port)
819 {
820 struct circ_buf *xmit = &port->state->xmit;
821 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
822
823 if (port->x_char &&
824 (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) {
825 atmel_uart_write_char(port, port->x_char);
826 port->icount.tx++;
827 port->x_char = 0;
828 }
829 if (uart_circ_empty(xmit) || uart_tx_stopped(port))
830 return;
831
832 while (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY) {
833 atmel_uart_write_char(port, xmit->buf[xmit->tail]);
834 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
835 port->icount.tx++;
836 if (uart_circ_empty(xmit))
837 break;
838 }
839
840 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
841 uart_write_wakeup(port);
842
843 if (!uart_circ_empty(xmit)) {
844 /* we still have characters to transmit, so we should continue
845 * transmitting them when TX is ready, regardless of
846 * mode or duplexity
847 */
848 atmel_port->tx_done_mask |= ATMEL_US_TXRDY;
849
850 /* Enable interrupts */
851 atmel_uart_writel(port, ATMEL_US_IER,
852 atmel_port->tx_done_mask);
853 } else {
854 if (atmel_uart_is_half_duplex(port))
855 atmel_port->tx_done_mask &= ~ATMEL_US_TXRDY;
856 }
857 }
858
atmel_complete_tx_dma(void * arg)859 static void atmel_complete_tx_dma(void *arg)
860 {
861 struct atmel_uart_port *atmel_port = arg;
862 struct uart_port *port = &atmel_port->uart;
863 struct circ_buf *xmit = &port->state->xmit;
864 struct dma_chan *chan = atmel_port->chan_tx;
865 unsigned long flags;
866
867 spin_lock_irqsave(&port->lock, flags);
868
869 if (chan)
870 dmaengine_terminate_all(chan);
871 xmit->tail += atmel_port->tx_len;
872 xmit->tail &= UART_XMIT_SIZE - 1;
873
874 port->icount.tx += atmel_port->tx_len;
875
876 spin_lock_irq(&atmel_port->lock_tx);
877 async_tx_ack(atmel_port->desc_tx);
878 atmel_port->cookie_tx = -EINVAL;
879 atmel_port->desc_tx = NULL;
880 spin_unlock_irq(&atmel_port->lock_tx);
881
882 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
883 uart_write_wakeup(port);
884
885 /*
886 * xmit is a circular buffer so, if we have just send data from
887 * xmit->tail to the end of xmit->buf, now we have to transmit the
888 * remaining data from the beginning of xmit->buf to xmit->head.
889 */
890 if (!uart_circ_empty(xmit))
891 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
892 else if (atmel_uart_is_half_duplex(port)) {
893 /*
894 * DMA done, re-enable TXEMPTY and signal that we can stop
895 * TX and start RX for RS485
896 */
897 atmel_port->hd_start_rx = true;
898 atmel_uart_writel(port, ATMEL_US_IER,
899 atmel_port->tx_done_mask);
900 }
901
902 spin_unlock_irqrestore(&port->lock, flags);
903 }
904
atmel_release_tx_dma(struct uart_port * port)905 static void atmel_release_tx_dma(struct uart_port *port)
906 {
907 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
908 struct dma_chan *chan = atmel_port->chan_tx;
909
910 if (chan) {
911 dmaengine_terminate_all(chan);
912 dma_release_channel(chan);
913 dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1,
914 DMA_TO_DEVICE);
915 }
916
917 atmel_port->desc_tx = NULL;
918 atmel_port->chan_tx = NULL;
919 atmel_port->cookie_tx = -EINVAL;
920 }
921
922 /*
923 * Called from tasklet with TXRDY interrupt is disabled.
924 */
atmel_tx_dma(struct uart_port * port)925 static void atmel_tx_dma(struct uart_port *port)
926 {
927 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
928 struct circ_buf *xmit = &port->state->xmit;
929 struct dma_chan *chan = atmel_port->chan_tx;
930 struct dma_async_tx_descriptor *desc;
931 struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx;
932 unsigned int tx_len, part1_len, part2_len, sg_len;
933 dma_addr_t phys_addr;
934
935 /* Make sure we have an idle channel */
936 if (atmel_port->desc_tx != NULL)
937 return;
938
939 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
940 /*
941 * DMA is idle now.
942 * Port xmit buffer is already mapped,
943 * and it is one page... Just adjust
944 * offsets and lengths. Since it is a circular buffer,
945 * we have to transmit till the end, and then the rest.
946 * Take the port lock to get a
947 * consistent xmit buffer state.
948 */
949 tx_len = CIRC_CNT_TO_END(xmit->head,
950 xmit->tail,
951 UART_XMIT_SIZE);
952
953 if (atmel_port->fifo_size) {
954 /* multi data mode */
955 part1_len = (tx_len & ~0x3); /* DWORD access */
956 part2_len = (tx_len & 0x3); /* BYTE access */
957 } else {
958 /* single data (legacy) mode */
959 part1_len = 0;
960 part2_len = tx_len; /* BYTE access only */
961 }
962
963 sg_init_table(sgl, 2);
964 sg_len = 0;
965 phys_addr = sg_dma_address(sg_tx) + xmit->tail;
966 if (part1_len) {
967 sg = &sgl[sg_len++];
968 sg_dma_address(sg) = phys_addr;
969 sg_dma_len(sg) = part1_len;
970
971 phys_addr += part1_len;
972 }
973
974 if (part2_len) {
975 sg = &sgl[sg_len++];
976 sg_dma_address(sg) = phys_addr;
977 sg_dma_len(sg) = part2_len;
978 }
979
980 /*
981 * save tx_len so atmel_complete_tx_dma() will increase
982 * xmit->tail correctly
983 */
984 atmel_port->tx_len = tx_len;
985
986 desc = dmaengine_prep_slave_sg(chan,
987 sgl,
988 sg_len,
989 DMA_MEM_TO_DEV,
990 DMA_PREP_INTERRUPT |
991 DMA_CTRL_ACK);
992 if (!desc) {
993 dev_err(port->dev, "Failed to send via dma!\n");
994 return;
995 }
996
997 dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE);
998
999 atmel_port->desc_tx = desc;
1000 desc->callback = atmel_complete_tx_dma;
1001 desc->callback_param = atmel_port;
1002 atmel_port->cookie_tx = dmaengine_submit(desc);
1003 if (dma_submit_error(atmel_port->cookie_tx)) {
1004 dev_err(port->dev, "dma_submit_error %d\n",
1005 atmel_port->cookie_tx);
1006 return;
1007 }
1008
1009 dma_async_issue_pending(chan);
1010 }
1011
1012 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1013 uart_write_wakeup(port);
1014 }
1015
atmel_prepare_tx_dma(struct uart_port * port)1016 static int atmel_prepare_tx_dma(struct uart_port *port)
1017 {
1018 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1019 struct device *mfd_dev = port->dev->parent;
1020 dma_cap_mask_t mask;
1021 struct dma_slave_config config;
1022 int ret, nent;
1023
1024 dma_cap_zero(mask);
1025 dma_cap_set(DMA_SLAVE, mask);
1026
1027 atmel_port->chan_tx = dma_request_slave_channel(mfd_dev, "tx");
1028 if (atmel_port->chan_tx == NULL)
1029 goto chan_err;
1030 dev_info(port->dev, "using %s for tx DMA transfers\n",
1031 dma_chan_name(atmel_port->chan_tx));
1032
1033 spin_lock_init(&atmel_port->lock_tx);
1034 sg_init_table(&atmel_port->sg_tx, 1);
1035 /* UART circular tx buffer is an aligned page. */
1036 BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf));
1037 sg_set_page(&atmel_port->sg_tx,
1038 virt_to_page(port->state->xmit.buf),
1039 UART_XMIT_SIZE,
1040 offset_in_page(port->state->xmit.buf));
1041 nent = dma_map_sg(port->dev,
1042 &atmel_port->sg_tx,
1043 1,
1044 DMA_TO_DEVICE);
1045
1046 if (!nent) {
1047 dev_dbg(port->dev, "need to release resource of dma\n");
1048 goto chan_err;
1049 } else {
1050 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
1051 sg_dma_len(&atmel_port->sg_tx),
1052 port->state->xmit.buf,
1053 &sg_dma_address(&atmel_port->sg_tx));
1054 }
1055
1056 /* Configure the slave DMA */
1057 memset(&config, 0, sizeof(config));
1058 config.direction = DMA_MEM_TO_DEV;
1059 config.dst_addr_width = (atmel_port->fifo_size) ?
1060 DMA_SLAVE_BUSWIDTH_4_BYTES :
1061 DMA_SLAVE_BUSWIDTH_1_BYTE;
1062 config.dst_addr = port->mapbase + ATMEL_US_THR;
1063 config.dst_maxburst = 1;
1064
1065 ret = dmaengine_slave_config(atmel_port->chan_tx,
1066 &config);
1067 if (ret) {
1068 dev_err(port->dev, "DMA tx slave configuration failed\n");
1069 goto chan_err;
1070 }
1071
1072 return 0;
1073
1074 chan_err:
1075 dev_err(port->dev, "TX channel not available, switch to pio\n");
1076 atmel_port->use_dma_tx = false;
1077 if (atmel_port->chan_tx)
1078 atmel_release_tx_dma(port);
1079 return -EINVAL;
1080 }
1081
atmel_complete_rx_dma(void * arg)1082 static void atmel_complete_rx_dma(void *arg)
1083 {
1084 struct uart_port *port = arg;
1085 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1086
1087 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
1088 }
1089
atmel_release_rx_dma(struct uart_port * port)1090 static void atmel_release_rx_dma(struct uart_port *port)
1091 {
1092 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1093 struct dma_chan *chan = atmel_port->chan_rx;
1094
1095 if (chan) {
1096 dmaengine_terminate_all(chan);
1097 dma_release_channel(chan);
1098 dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1,
1099 DMA_FROM_DEVICE);
1100 }
1101
1102 atmel_port->desc_rx = NULL;
1103 atmel_port->chan_rx = NULL;
1104 atmel_port->cookie_rx = -EINVAL;
1105 }
1106
atmel_rx_from_dma(struct uart_port * port)1107 static void atmel_rx_from_dma(struct uart_port *port)
1108 {
1109 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1110 struct tty_port *tport = &port->state->port;
1111 struct circ_buf *ring = &atmel_port->rx_ring;
1112 struct dma_chan *chan = atmel_port->chan_rx;
1113 struct dma_tx_state state;
1114 enum dma_status dmastat;
1115 size_t count;
1116
1117
1118 /* Reset the UART timeout early so that we don't miss one */
1119 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1120 dmastat = dmaengine_tx_status(chan,
1121 atmel_port->cookie_rx,
1122 &state);
1123 /* Restart a new tasklet if DMA status is error */
1124 if (dmastat == DMA_ERROR) {
1125 dev_dbg(port->dev, "Get residue error, restart tasklet\n");
1126 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1127 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
1128 return;
1129 }
1130
1131 /* CPU claims ownership of RX DMA buffer */
1132 dma_sync_sg_for_cpu(port->dev,
1133 &atmel_port->sg_rx,
1134 1,
1135 DMA_FROM_DEVICE);
1136
1137 /*
1138 * ring->head points to the end of data already written by the DMA.
1139 * ring->tail points to the beginning of data to be read by the
1140 * framework.
1141 * The current transfer size should not be larger than the dma buffer
1142 * length.
1143 */
1144 ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue;
1145 BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx));
1146 /*
1147 * At this point ring->head may point to the first byte right after the
1148 * last byte of the dma buffer:
1149 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx)
1150 *
1151 * However ring->tail must always points inside the dma buffer:
1152 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1
1153 *
1154 * Since we use a ring buffer, we have to handle the case
1155 * where head is lower than tail. In such a case, we first read from
1156 * tail to the end of the buffer then reset tail.
1157 */
1158 if (ring->head < ring->tail) {
1159 count = sg_dma_len(&atmel_port->sg_rx) - ring->tail;
1160
1161 tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1162 ring->tail = 0;
1163 port->icount.rx += count;
1164 }
1165
1166 /* Finally we read data from tail to head */
1167 if (ring->tail < ring->head) {
1168 count = ring->head - ring->tail;
1169
1170 tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1171 /* Wrap ring->head if needed */
1172 if (ring->head >= sg_dma_len(&atmel_port->sg_rx))
1173 ring->head = 0;
1174 ring->tail = ring->head;
1175 port->icount.rx += count;
1176 }
1177
1178 /* USART retreives ownership of RX DMA buffer */
1179 dma_sync_sg_for_device(port->dev,
1180 &atmel_port->sg_rx,
1181 1,
1182 DMA_FROM_DEVICE);
1183
1184 /*
1185 * Drop the lock here since it might end up calling
1186 * uart_start(), which takes the lock.
1187 */
1188 spin_unlock(&port->lock);
1189 tty_flip_buffer_push(tport);
1190 spin_lock(&port->lock);
1191
1192 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1193 }
1194
atmel_prepare_rx_dma(struct uart_port * port)1195 static int atmel_prepare_rx_dma(struct uart_port *port)
1196 {
1197 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1198 struct device *mfd_dev = port->dev->parent;
1199 struct dma_async_tx_descriptor *desc;
1200 dma_cap_mask_t mask;
1201 struct dma_slave_config config;
1202 struct circ_buf *ring;
1203 int ret, nent;
1204
1205 ring = &atmel_port->rx_ring;
1206
1207 dma_cap_zero(mask);
1208 dma_cap_set(DMA_CYCLIC, mask);
1209
1210 atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx");
1211 if (atmel_port->chan_rx == NULL)
1212 goto chan_err;
1213 dev_info(port->dev, "using %s for rx DMA transfers\n",
1214 dma_chan_name(atmel_port->chan_rx));
1215
1216 spin_lock_init(&atmel_port->lock_rx);
1217 sg_init_table(&atmel_port->sg_rx, 1);
1218 /* UART circular rx buffer is an aligned page. */
1219 BUG_ON(!PAGE_ALIGNED(ring->buf));
1220 sg_set_page(&atmel_port->sg_rx,
1221 virt_to_page(ring->buf),
1222 sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE,
1223 offset_in_page(ring->buf));
1224 nent = dma_map_sg(port->dev,
1225 &atmel_port->sg_rx,
1226 1,
1227 DMA_FROM_DEVICE);
1228
1229 if (!nent) {
1230 dev_dbg(port->dev, "need to release resource of dma\n");
1231 goto chan_err;
1232 } else {
1233 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
1234 sg_dma_len(&atmel_port->sg_rx),
1235 ring->buf,
1236 &sg_dma_address(&atmel_port->sg_rx));
1237 }
1238
1239 /* Configure the slave DMA */
1240 memset(&config, 0, sizeof(config));
1241 config.direction = DMA_DEV_TO_MEM;
1242 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1243 config.src_addr = port->mapbase + ATMEL_US_RHR;
1244 config.src_maxburst = 1;
1245
1246 ret = dmaengine_slave_config(atmel_port->chan_rx,
1247 &config);
1248 if (ret) {
1249 dev_err(port->dev, "DMA rx slave configuration failed\n");
1250 goto chan_err;
1251 }
1252 /*
1253 * Prepare a cyclic dma transfer, assign 2 descriptors,
1254 * each one is half ring buffer size
1255 */
1256 desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
1257 sg_dma_address(&atmel_port->sg_rx),
1258 sg_dma_len(&atmel_port->sg_rx),
1259 sg_dma_len(&atmel_port->sg_rx)/2,
1260 DMA_DEV_TO_MEM,
1261 DMA_PREP_INTERRUPT);
1262 if (!desc) {
1263 dev_err(port->dev, "Preparing DMA cyclic failed\n");
1264 goto chan_err;
1265 }
1266 desc->callback = atmel_complete_rx_dma;
1267 desc->callback_param = port;
1268 atmel_port->desc_rx = desc;
1269 atmel_port->cookie_rx = dmaengine_submit(desc);
1270 if (dma_submit_error(atmel_port->cookie_rx)) {
1271 dev_err(port->dev, "dma_submit_error %d\n",
1272 atmel_port->cookie_rx);
1273 goto chan_err;
1274 }
1275
1276 dma_async_issue_pending(atmel_port->chan_rx);
1277
1278 return 0;
1279
1280 chan_err:
1281 dev_err(port->dev, "RX channel not available, switch to pio\n");
1282 atmel_port->use_dma_rx = false;
1283 if (atmel_port->chan_rx)
1284 atmel_release_rx_dma(port);
1285 return -EINVAL;
1286 }
1287
atmel_uart_timer_callback(struct timer_list * t)1288 static void atmel_uart_timer_callback(struct timer_list *t)
1289 {
1290 struct atmel_uart_port *atmel_port = from_timer(atmel_port, t,
1291 uart_timer);
1292 struct uart_port *port = &atmel_port->uart;
1293
1294 if (!atomic_read(&atmel_port->tasklet_shutdown)) {
1295 tasklet_schedule(&atmel_port->tasklet_rx);
1296 mod_timer(&atmel_port->uart_timer,
1297 jiffies + uart_poll_timeout(port));
1298 }
1299 }
1300
1301 /*
1302 * receive interrupt handler.
1303 */
1304 static void
atmel_handle_receive(struct uart_port * port,unsigned int pending)1305 atmel_handle_receive(struct uart_port *port, unsigned int pending)
1306 {
1307 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1308
1309 if (atmel_use_pdc_rx(port)) {
1310 /*
1311 * PDC receive. Just schedule the tasklet and let it
1312 * figure out the details.
1313 *
1314 * TODO: We're not handling error flags correctly at
1315 * the moment.
1316 */
1317 if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
1318 atmel_uart_writel(port, ATMEL_US_IDR,
1319 (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
1320 atmel_tasklet_schedule(atmel_port,
1321 &atmel_port->tasklet_rx);
1322 }
1323
1324 if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
1325 ATMEL_US_FRAME | ATMEL_US_PARE))
1326 atmel_pdc_rxerr(port, pending);
1327 }
1328
1329 if (atmel_use_dma_rx(port)) {
1330 if (pending & ATMEL_US_TIMEOUT) {
1331 atmel_uart_writel(port, ATMEL_US_IDR,
1332 ATMEL_US_TIMEOUT);
1333 atmel_tasklet_schedule(atmel_port,
1334 &atmel_port->tasklet_rx);
1335 }
1336 }
1337
1338 /* Interrupt receive */
1339 if (pending & ATMEL_US_RXRDY)
1340 atmel_rx_chars(port);
1341 else if (pending & ATMEL_US_RXBRK) {
1342 /*
1343 * End of break detected. If it came along with a
1344 * character, atmel_rx_chars will handle it.
1345 */
1346 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
1347 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
1348 atmel_port->break_active = 0;
1349 }
1350 }
1351
1352 /*
1353 * transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
1354 */
1355 static void
atmel_handle_transmit(struct uart_port * port,unsigned int pending)1356 atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1357 {
1358 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1359
1360 if (pending & atmel_port->tx_done_mask) {
1361 atmel_uart_writel(port, ATMEL_US_IDR,
1362 atmel_port->tx_done_mask);
1363
1364 /* Start RX if flag was set and FIFO is empty */
1365 if (atmel_port->hd_start_rx) {
1366 if (!(atmel_uart_readl(port, ATMEL_US_CSR)
1367 & ATMEL_US_TXEMPTY))
1368 dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
1369
1370 atmel_port->hd_start_rx = false;
1371 atmel_start_rx(port);
1372 }
1373
1374 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
1375 }
1376 }
1377
1378 /*
1379 * status flags interrupt handler.
1380 */
1381 static void
atmel_handle_status(struct uart_port * port,unsigned int pending,unsigned int status)1382 atmel_handle_status(struct uart_port *port, unsigned int pending,
1383 unsigned int status)
1384 {
1385 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1386 unsigned int status_change;
1387
1388 if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
1389 | ATMEL_US_CTSIC)) {
1390 status_change = status ^ atmel_port->irq_status_prev;
1391 atmel_port->irq_status_prev = status;
1392
1393 if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
1394 | ATMEL_US_DCD | ATMEL_US_CTS)) {
1395 /* TODO: All reads to CSR will clear these interrupts! */
1396 if (status_change & ATMEL_US_RI)
1397 port->icount.rng++;
1398 if (status_change & ATMEL_US_DSR)
1399 port->icount.dsr++;
1400 if (status_change & ATMEL_US_DCD)
1401 uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
1402 if (status_change & ATMEL_US_CTS)
1403 uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
1404
1405 wake_up_interruptible(&port->state->port.delta_msr_wait);
1406 }
1407 }
1408
1409 if (pending & (ATMEL_US_NACK | ATMEL_US_ITERATION))
1410 dev_dbg(port->dev, "ISO7816 ERROR (0x%08x)\n", pending);
1411 }
1412
1413 /*
1414 * Interrupt handler
1415 */
atmel_interrupt(int irq,void * dev_id)1416 static irqreturn_t atmel_interrupt(int irq, void *dev_id)
1417 {
1418 struct uart_port *port = dev_id;
1419 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1420 unsigned int status, pending, mask, pass_counter = 0;
1421
1422 spin_lock(&atmel_port->lock_suspended);
1423
1424 do {
1425 status = atmel_uart_readl(port, ATMEL_US_CSR);
1426 mask = atmel_uart_readl(port, ATMEL_US_IMR);
1427 pending = status & mask;
1428 if (!pending)
1429 break;
1430
1431 if (atmel_port->suspended) {
1432 atmel_port->pending |= pending;
1433 atmel_port->pending_status = status;
1434 atmel_uart_writel(port, ATMEL_US_IDR, mask);
1435 pm_system_wakeup();
1436 break;
1437 }
1438
1439 atmel_handle_receive(port, pending);
1440 atmel_handle_status(port, pending, status);
1441 atmel_handle_transmit(port, pending);
1442 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
1443
1444 spin_unlock(&atmel_port->lock_suspended);
1445
1446 return pass_counter ? IRQ_HANDLED : IRQ_NONE;
1447 }
1448
atmel_release_tx_pdc(struct uart_port * port)1449 static void atmel_release_tx_pdc(struct uart_port *port)
1450 {
1451 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1452 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1453
1454 dma_unmap_single(port->dev,
1455 pdc->dma_addr,
1456 pdc->dma_size,
1457 DMA_TO_DEVICE);
1458 }
1459
1460 /*
1461 * Called from tasklet with ENDTX and TXBUFE interrupts disabled.
1462 */
atmel_tx_pdc(struct uart_port * port)1463 static void atmel_tx_pdc(struct uart_port *port)
1464 {
1465 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1466 struct circ_buf *xmit = &port->state->xmit;
1467 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1468 int count;
1469
1470 /* nothing left to transmit? */
1471 if (atmel_uart_readl(port, ATMEL_PDC_TCR))
1472 return;
1473
1474 xmit->tail += pdc->ofs;
1475 xmit->tail &= UART_XMIT_SIZE - 1;
1476
1477 port->icount.tx += pdc->ofs;
1478 pdc->ofs = 0;
1479
1480 /* more to transmit - setup next transfer */
1481
1482 /* disable PDC transmit */
1483 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
1484
1485 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
1486 dma_sync_single_for_device(port->dev,
1487 pdc->dma_addr,
1488 pdc->dma_size,
1489 DMA_TO_DEVICE);
1490
1491 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
1492 pdc->ofs = count;
1493
1494 atmel_uart_writel(port, ATMEL_PDC_TPR,
1495 pdc->dma_addr + xmit->tail);
1496 atmel_uart_writel(port, ATMEL_PDC_TCR, count);
1497 /* re-enable PDC transmit */
1498 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1499 /* Enable interrupts */
1500 atmel_uart_writel(port, ATMEL_US_IER,
1501 atmel_port->tx_done_mask);
1502 } else {
1503 if (atmel_uart_is_half_duplex(port)) {
1504 /* DMA done, stop TX, start RX for RS485 */
1505 atmel_start_rx(port);
1506 }
1507 }
1508
1509 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1510 uart_write_wakeup(port);
1511 }
1512
atmel_prepare_tx_pdc(struct uart_port * port)1513 static int atmel_prepare_tx_pdc(struct uart_port *port)
1514 {
1515 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1516 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1517 struct circ_buf *xmit = &port->state->xmit;
1518
1519 pdc->buf = xmit->buf;
1520 pdc->dma_addr = dma_map_single(port->dev,
1521 pdc->buf,
1522 UART_XMIT_SIZE,
1523 DMA_TO_DEVICE);
1524 pdc->dma_size = UART_XMIT_SIZE;
1525 pdc->ofs = 0;
1526
1527 return 0;
1528 }
1529
atmel_rx_from_ring(struct uart_port * port)1530 static void atmel_rx_from_ring(struct uart_port *port)
1531 {
1532 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1533 struct circ_buf *ring = &atmel_port->rx_ring;
1534 unsigned int flg;
1535 unsigned int status;
1536
1537 while (ring->head != ring->tail) {
1538 struct atmel_uart_char c;
1539
1540 /* Make sure c is loaded after head. */
1541 smp_rmb();
1542
1543 c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
1544
1545 ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
1546
1547 port->icount.rx++;
1548 status = c.status;
1549 flg = TTY_NORMAL;
1550
1551 /*
1552 * note that the error handling code is
1553 * out of the main execution path
1554 */
1555 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
1556 | ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
1557 if (status & ATMEL_US_RXBRK) {
1558 /* ignore side-effect */
1559 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
1560
1561 port->icount.brk++;
1562 if (uart_handle_break(port))
1563 continue;
1564 }
1565 if (status & ATMEL_US_PARE)
1566 port->icount.parity++;
1567 if (status & ATMEL_US_FRAME)
1568 port->icount.frame++;
1569 if (status & ATMEL_US_OVRE)
1570 port->icount.overrun++;
1571
1572 status &= port->read_status_mask;
1573
1574 if (status & ATMEL_US_RXBRK)
1575 flg = TTY_BREAK;
1576 else if (status & ATMEL_US_PARE)
1577 flg = TTY_PARITY;
1578 else if (status & ATMEL_US_FRAME)
1579 flg = TTY_FRAME;
1580 }
1581
1582
1583 if (uart_handle_sysrq_char(port, c.ch))
1584 continue;
1585
1586 uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
1587 }
1588
1589 /*
1590 * Drop the lock here since it might end up calling
1591 * uart_start(), which takes the lock.
1592 */
1593 spin_unlock(&port->lock);
1594 tty_flip_buffer_push(&port->state->port);
1595 spin_lock(&port->lock);
1596 }
1597
atmel_release_rx_pdc(struct uart_port * port)1598 static void atmel_release_rx_pdc(struct uart_port *port)
1599 {
1600 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1601 int i;
1602
1603 for (i = 0; i < 2; i++) {
1604 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1605
1606 dma_unmap_single(port->dev,
1607 pdc->dma_addr,
1608 pdc->dma_size,
1609 DMA_FROM_DEVICE);
1610 kfree(pdc->buf);
1611 }
1612 }
1613
atmel_rx_from_pdc(struct uart_port * port)1614 static void atmel_rx_from_pdc(struct uart_port *port)
1615 {
1616 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1617 struct tty_port *tport = &port->state->port;
1618 struct atmel_dma_buffer *pdc;
1619 int rx_idx = atmel_port->pdc_rx_idx;
1620 unsigned int head;
1621 unsigned int tail;
1622 unsigned int count;
1623
1624 do {
1625 /* Reset the UART timeout early so that we don't miss one */
1626 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1627
1628 pdc = &atmel_port->pdc_rx[rx_idx];
1629 head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
1630 tail = pdc->ofs;
1631
1632 /* If the PDC has switched buffers, RPR won't contain
1633 * any address within the current buffer. Since head
1634 * is unsigned, we just need a one-way comparison to
1635 * find out.
1636 *
1637 * In this case, we just need to consume the entire
1638 * buffer and resubmit it for DMA. This will clear the
1639 * ENDRX bit as well, so that we can safely re-enable
1640 * all interrupts below.
1641 */
1642 head = min(head, pdc->dma_size);
1643
1644 if (likely(head != tail)) {
1645 dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
1646 pdc->dma_size, DMA_FROM_DEVICE);
1647
1648 /*
1649 * head will only wrap around when we recycle
1650 * the DMA buffer, and when that happens, we
1651 * explicitly set tail to 0. So head will
1652 * always be greater than tail.
1653 */
1654 count = head - tail;
1655
1656 tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
1657 count);
1658
1659 dma_sync_single_for_device(port->dev, pdc->dma_addr,
1660 pdc->dma_size, DMA_FROM_DEVICE);
1661
1662 port->icount.rx += count;
1663 pdc->ofs = head;
1664 }
1665
1666 /*
1667 * If the current buffer is full, we need to check if
1668 * the next one contains any additional data.
1669 */
1670 if (head >= pdc->dma_size) {
1671 pdc->ofs = 0;
1672 atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
1673 atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
1674
1675 rx_idx = !rx_idx;
1676 atmel_port->pdc_rx_idx = rx_idx;
1677 }
1678 } while (head >= pdc->dma_size);
1679
1680 /*
1681 * Drop the lock here since it might end up calling
1682 * uart_start(), which takes the lock.
1683 */
1684 spin_unlock(&port->lock);
1685 tty_flip_buffer_push(tport);
1686 spin_lock(&port->lock);
1687
1688 atmel_uart_writel(port, ATMEL_US_IER,
1689 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1690 }
1691
atmel_prepare_rx_pdc(struct uart_port * port)1692 static int atmel_prepare_rx_pdc(struct uart_port *port)
1693 {
1694 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1695 int i;
1696
1697 for (i = 0; i < 2; i++) {
1698 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1699
1700 pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
1701 if (pdc->buf == NULL) {
1702 if (i != 0) {
1703 dma_unmap_single(port->dev,
1704 atmel_port->pdc_rx[0].dma_addr,
1705 PDC_BUFFER_SIZE,
1706 DMA_FROM_DEVICE);
1707 kfree(atmel_port->pdc_rx[0].buf);
1708 }
1709 atmel_port->use_pdc_rx = false;
1710 return -ENOMEM;
1711 }
1712 pdc->dma_addr = dma_map_single(port->dev,
1713 pdc->buf,
1714 PDC_BUFFER_SIZE,
1715 DMA_FROM_DEVICE);
1716 pdc->dma_size = PDC_BUFFER_SIZE;
1717 pdc->ofs = 0;
1718 }
1719
1720 atmel_port->pdc_rx_idx = 0;
1721
1722 atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
1723 atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
1724
1725 atmel_uart_writel(port, ATMEL_PDC_RNPR,
1726 atmel_port->pdc_rx[1].dma_addr);
1727 atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
1728
1729 return 0;
1730 }
1731
1732 /*
1733 * tasklet handling tty stuff outside the interrupt handler.
1734 */
atmel_tasklet_rx_func(struct tasklet_struct * t)1735 static void atmel_tasklet_rx_func(struct tasklet_struct *t)
1736 {
1737 struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t,
1738 tasklet_rx);
1739 struct uart_port *port = &atmel_port->uart;
1740
1741 /* The interrupt handler does not take the lock */
1742 spin_lock(&port->lock);
1743 atmel_port->schedule_rx(port);
1744 spin_unlock(&port->lock);
1745 }
1746
atmel_tasklet_tx_func(struct tasklet_struct * t)1747 static void atmel_tasklet_tx_func(struct tasklet_struct *t)
1748 {
1749 struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t,
1750 tasklet_tx);
1751 struct uart_port *port = &atmel_port->uart;
1752
1753 /* The interrupt handler does not take the lock */
1754 spin_lock(&port->lock);
1755 atmel_port->schedule_tx(port);
1756 spin_unlock(&port->lock);
1757 }
1758
atmel_init_property(struct atmel_uart_port * atmel_port,struct platform_device * pdev)1759 static void atmel_init_property(struct atmel_uart_port *atmel_port,
1760 struct platform_device *pdev)
1761 {
1762 struct device_node *np = pdev->dev.of_node;
1763
1764 /* DMA/PDC usage specification */
1765 if (of_property_read_bool(np, "atmel,use-dma-rx")) {
1766 if (of_property_read_bool(np, "dmas")) {
1767 atmel_port->use_dma_rx = true;
1768 atmel_port->use_pdc_rx = false;
1769 } else {
1770 atmel_port->use_dma_rx = false;
1771 atmel_port->use_pdc_rx = true;
1772 }
1773 } else {
1774 atmel_port->use_dma_rx = false;
1775 atmel_port->use_pdc_rx = false;
1776 }
1777
1778 if (of_property_read_bool(np, "atmel,use-dma-tx")) {
1779 if (of_property_read_bool(np, "dmas")) {
1780 atmel_port->use_dma_tx = true;
1781 atmel_port->use_pdc_tx = false;
1782 } else {
1783 atmel_port->use_dma_tx = false;
1784 atmel_port->use_pdc_tx = true;
1785 }
1786 } else {
1787 atmel_port->use_dma_tx = false;
1788 atmel_port->use_pdc_tx = false;
1789 }
1790 }
1791
atmel_set_ops(struct uart_port * port)1792 static void atmel_set_ops(struct uart_port *port)
1793 {
1794 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1795
1796 if (atmel_use_dma_rx(port)) {
1797 atmel_port->prepare_rx = &atmel_prepare_rx_dma;
1798 atmel_port->schedule_rx = &atmel_rx_from_dma;
1799 atmel_port->release_rx = &atmel_release_rx_dma;
1800 } else if (atmel_use_pdc_rx(port)) {
1801 atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
1802 atmel_port->schedule_rx = &atmel_rx_from_pdc;
1803 atmel_port->release_rx = &atmel_release_rx_pdc;
1804 } else {
1805 atmel_port->prepare_rx = NULL;
1806 atmel_port->schedule_rx = &atmel_rx_from_ring;
1807 atmel_port->release_rx = NULL;
1808 }
1809
1810 if (atmel_use_dma_tx(port)) {
1811 atmel_port->prepare_tx = &atmel_prepare_tx_dma;
1812 atmel_port->schedule_tx = &atmel_tx_dma;
1813 atmel_port->release_tx = &atmel_release_tx_dma;
1814 } else if (atmel_use_pdc_tx(port)) {
1815 atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
1816 atmel_port->schedule_tx = &atmel_tx_pdc;
1817 atmel_port->release_tx = &atmel_release_tx_pdc;
1818 } else {
1819 atmel_port->prepare_tx = NULL;
1820 atmel_port->schedule_tx = &atmel_tx_chars;
1821 atmel_port->release_tx = NULL;
1822 }
1823 }
1824
1825 /*
1826 * Get ip name usart or uart
1827 */
atmel_get_ip_name(struct uart_port * port)1828 static void atmel_get_ip_name(struct uart_port *port)
1829 {
1830 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1831 int name = atmel_uart_readl(port, ATMEL_US_NAME);
1832 u32 version;
1833 u32 usart, dbgu_uart, new_uart;
1834 /* ASCII decoding for IP version */
1835 usart = 0x55534152; /* USAR(T) */
1836 dbgu_uart = 0x44424755; /* DBGU */
1837 new_uart = 0x55415254; /* UART */
1838
1839 /*
1840 * Only USART devices from at91sam9260 SOC implement fractional
1841 * baudrate. It is available for all asynchronous modes, with the
1842 * following restriction: the sampling clock's duty cycle is not
1843 * constant.
1844 */
1845 atmel_port->has_frac_baudrate = false;
1846 atmel_port->has_hw_timer = false;
1847
1848 if (name == new_uart) {
1849 dev_dbg(port->dev, "Uart with hw timer");
1850 atmel_port->has_hw_timer = true;
1851 atmel_port->rtor = ATMEL_UA_RTOR;
1852 } else if (name == usart) {
1853 dev_dbg(port->dev, "Usart\n");
1854 atmel_port->has_frac_baudrate = true;
1855 atmel_port->has_hw_timer = true;
1856 atmel_port->rtor = ATMEL_US_RTOR;
1857 version = atmel_uart_readl(port, ATMEL_US_VERSION);
1858 switch (version) {
1859 case 0x814: /* sama5d2 */
1860 fallthrough;
1861 case 0x701: /* sama5d4 */
1862 atmel_port->fidi_min = 3;
1863 atmel_port->fidi_max = 65535;
1864 break;
1865 case 0x502: /* sam9x5, sama5d3 */
1866 atmel_port->fidi_min = 3;
1867 atmel_port->fidi_max = 2047;
1868 break;
1869 default:
1870 atmel_port->fidi_min = 1;
1871 atmel_port->fidi_max = 2047;
1872 }
1873 } else if (name == dbgu_uart) {
1874 dev_dbg(port->dev, "Dbgu or uart without hw timer\n");
1875 } else {
1876 /* fallback for older SoCs: use version field */
1877 version = atmel_uart_readl(port, ATMEL_US_VERSION);
1878 switch (version) {
1879 case 0x302:
1880 case 0x10213:
1881 case 0x10302:
1882 dev_dbg(port->dev, "This version is usart\n");
1883 atmel_port->has_frac_baudrate = true;
1884 atmel_port->has_hw_timer = true;
1885 atmel_port->rtor = ATMEL_US_RTOR;
1886 break;
1887 case 0x203:
1888 case 0x10202:
1889 dev_dbg(port->dev, "This version is uart\n");
1890 break;
1891 default:
1892 dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
1893 }
1894 }
1895 }
1896
1897 /*
1898 * Perform initialization and enable port for reception
1899 */
atmel_startup(struct uart_port * port)1900 static int atmel_startup(struct uart_port *port)
1901 {
1902 struct platform_device *pdev = to_platform_device(port->dev);
1903 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1904 int retval;
1905
1906 /*
1907 * Ensure that no interrupts are enabled otherwise when
1908 * request_irq() is called we could get stuck trying to
1909 * handle an unexpected interrupt
1910 */
1911 atmel_uart_writel(port, ATMEL_US_IDR, -1);
1912 atmel_port->ms_irq_enabled = false;
1913
1914 /*
1915 * Allocate the IRQ
1916 */
1917 retval = request_irq(port->irq, atmel_interrupt,
1918 IRQF_SHARED | IRQF_COND_SUSPEND,
1919 dev_name(&pdev->dev), port);
1920 if (retval) {
1921 dev_err(port->dev, "atmel_startup - Can't get irq\n");
1922 return retval;
1923 }
1924
1925 atomic_set(&atmel_port->tasklet_shutdown, 0);
1926 tasklet_setup(&atmel_port->tasklet_rx, atmel_tasklet_rx_func);
1927 tasklet_setup(&atmel_port->tasklet_tx, atmel_tasklet_tx_func);
1928
1929 /*
1930 * Initialize DMA (if necessary)
1931 */
1932 atmel_init_property(atmel_port, pdev);
1933 atmel_set_ops(port);
1934
1935 if (atmel_port->prepare_rx) {
1936 retval = atmel_port->prepare_rx(port);
1937 if (retval < 0)
1938 atmel_set_ops(port);
1939 }
1940
1941 if (atmel_port->prepare_tx) {
1942 retval = atmel_port->prepare_tx(port);
1943 if (retval < 0)
1944 atmel_set_ops(port);
1945 }
1946
1947 /*
1948 * Enable FIFO when available
1949 */
1950 if (atmel_port->fifo_size) {
1951 unsigned int txrdym = ATMEL_US_ONE_DATA;
1952 unsigned int rxrdym = ATMEL_US_ONE_DATA;
1953 unsigned int fmr;
1954
1955 atmel_uart_writel(port, ATMEL_US_CR,
1956 ATMEL_US_FIFOEN |
1957 ATMEL_US_RXFCLR |
1958 ATMEL_US_TXFLCLR);
1959
1960 if (atmel_use_dma_tx(port))
1961 txrdym = ATMEL_US_FOUR_DATA;
1962
1963 fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym);
1964 if (atmel_port->rts_high &&
1965 atmel_port->rts_low)
1966 fmr |= ATMEL_US_FRTSC |
1967 ATMEL_US_RXFTHRES(atmel_port->rts_high) |
1968 ATMEL_US_RXFTHRES2(atmel_port->rts_low);
1969
1970 atmel_uart_writel(port, ATMEL_US_FMR, fmr);
1971 }
1972
1973 /* Save current CSR for comparison in atmel_tasklet_func() */
1974 atmel_port->irq_status_prev = atmel_uart_readl(port, ATMEL_US_CSR);
1975
1976 /*
1977 * Finally, enable the serial port
1978 */
1979 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
1980 /* enable xmit & rcvr */
1981 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
1982 atmel_port->tx_stopped = false;
1983
1984 timer_setup(&atmel_port->uart_timer, atmel_uart_timer_callback, 0);
1985
1986 if (atmel_use_pdc_rx(port)) {
1987 /* set UART timeout */
1988 if (!atmel_port->has_hw_timer) {
1989 mod_timer(&atmel_port->uart_timer,
1990 jiffies + uart_poll_timeout(port));
1991 /* set USART timeout */
1992 } else {
1993 atmel_uart_writel(port, atmel_port->rtor,
1994 PDC_RX_TIMEOUT);
1995 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1996
1997 atmel_uart_writel(port, ATMEL_US_IER,
1998 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1999 }
2000 /* enable PDC controller */
2001 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
2002 } else if (atmel_use_dma_rx(port)) {
2003 /* set UART timeout */
2004 if (!atmel_port->has_hw_timer) {
2005 mod_timer(&atmel_port->uart_timer,
2006 jiffies + uart_poll_timeout(port));
2007 /* set USART timeout */
2008 } else {
2009 atmel_uart_writel(port, atmel_port->rtor,
2010 PDC_RX_TIMEOUT);
2011 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
2012
2013 atmel_uart_writel(port, ATMEL_US_IER,
2014 ATMEL_US_TIMEOUT);
2015 }
2016 } else {
2017 /* enable receive only */
2018 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
2019 }
2020
2021 return 0;
2022 }
2023
2024 /*
2025 * Flush any TX data submitted for DMA. Called when the TX circular
2026 * buffer is reset.
2027 */
atmel_flush_buffer(struct uart_port * port)2028 static void atmel_flush_buffer(struct uart_port *port)
2029 {
2030 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2031
2032 if (atmel_use_pdc_tx(port)) {
2033 atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
2034 atmel_port->pdc_tx.ofs = 0;
2035 }
2036 /*
2037 * in uart_flush_buffer(), the xmit circular buffer has just
2038 * been cleared, so we have to reset tx_len accordingly.
2039 */
2040 atmel_port->tx_len = 0;
2041 }
2042
2043 /*
2044 * Disable the port
2045 */
atmel_shutdown(struct uart_port * port)2046 static void atmel_shutdown(struct uart_port *port)
2047 {
2048 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2049
2050 /* Disable modem control lines interrupts */
2051 atmel_disable_ms(port);
2052
2053 /* Disable interrupts at device level */
2054 atmel_uart_writel(port, ATMEL_US_IDR, -1);
2055
2056 /* Prevent spurious interrupts from scheduling the tasklet */
2057 atomic_inc(&atmel_port->tasklet_shutdown);
2058
2059 /*
2060 * Prevent any tasklets being scheduled during
2061 * cleanup
2062 */
2063 del_timer_sync(&atmel_port->uart_timer);
2064
2065 /* Make sure that no interrupt is on the fly */
2066 synchronize_irq(port->irq);
2067
2068 /*
2069 * Clear out any scheduled tasklets before
2070 * we destroy the buffers
2071 */
2072 tasklet_kill(&atmel_port->tasklet_rx);
2073 tasklet_kill(&atmel_port->tasklet_tx);
2074
2075 /*
2076 * Ensure everything is stopped and
2077 * disable port and break condition.
2078 */
2079 atmel_stop_rx(port);
2080 atmel_stop_tx(port);
2081
2082 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
2083
2084 /*
2085 * Shut-down the DMA.
2086 */
2087 if (atmel_port->release_rx)
2088 atmel_port->release_rx(port);
2089 if (atmel_port->release_tx)
2090 atmel_port->release_tx(port);
2091
2092 /*
2093 * Reset ring buffer pointers
2094 */
2095 atmel_port->rx_ring.head = 0;
2096 atmel_port->rx_ring.tail = 0;
2097
2098 /*
2099 * Free the interrupts
2100 */
2101 free_irq(port->irq, port);
2102
2103 atmel_flush_buffer(port);
2104 }
2105
2106 /*
2107 * Power / Clock management.
2108 */
atmel_serial_pm(struct uart_port * port,unsigned int state,unsigned int oldstate)2109 static void atmel_serial_pm(struct uart_port *port, unsigned int state,
2110 unsigned int oldstate)
2111 {
2112 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2113
2114 switch (state) {
2115 case 0:
2116 /*
2117 * Enable the peripheral clock for this serial port.
2118 * This is called on uart_open() or a resume event.
2119 */
2120 clk_prepare_enable(atmel_port->clk);
2121
2122 /* re-enable interrupts if we disabled some on suspend */
2123 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
2124 break;
2125 case 3:
2126 /* Back up the interrupt mask and disable all interrupts */
2127 atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
2128 atmel_uart_writel(port, ATMEL_US_IDR, -1);
2129
2130 /*
2131 * Disable the peripheral clock for this serial port.
2132 * This is called on uart_close() or a suspend event.
2133 */
2134 clk_disable_unprepare(atmel_port->clk);
2135 break;
2136 default:
2137 dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
2138 }
2139 }
2140
2141 /*
2142 * Change the port parameters
2143 */
atmel_set_termios(struct uart_port * port,struct ktermios * termios,struct ktermios * old)2144 static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
2145 struct ktermios *old)
2146 {
2147 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2148 unsigned long flags;
2149 unsigned int old_mode, mode, imr, quot, baud, div, cd, fp = 0;
2150
2151 /* save the current mode register */
2152 mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
2153
2154 /* reset the mode, clock divisor, parity, stop bits and data size */
2155 mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
2156 ATMEL_US_PAR | ATMEL_US_USMODE);
2157
2158 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
2159
2160 /* byte size */
2161 switch (termios->c_cflag & CSIZE) {
2162 case CS5:
2163 mode |= ATMEL_US_CHRL_5;
2164 break;
2165 case CS6:
2166 mode |= ATMEL_US_CHRL_6;
2167 break;
2168 case CS7:
2169 mode |= ATMEL_US_CHRL_7;
2170 break;
2171 default:
2172 mode |= ATMEL_US_CHRL_8;
2173 break;
2174 }
2175
2176 /* stop bits */
2177 if (termios->c_cflag & CSTOPB)
2178 mode |= ATMEL_US_NBSTOP_2;
2179
2180 /* parity */
2181 if (termios->c_cflag & PARENB) {
2182 /* Mark or Space parity */
2183 if (termios->c_cflag & CMSPAR) {
2184 if (termios->c_cflag & PARODD)
2185 mode |= ATMEL_US_PAR_MARK;
2186 else
2187 mode |= ATMEL_US_PAR_SPACE;
2188 } else if (termios->c_cflag & PARODD)
2189 mode |= ATMEL_US_PAR_ODD;
2190 else
2191 mode |= ATMEL_US_PAR_EVEN;
2192 } else
2193 mode |= ATMEL_US_PAR_NONE;
2194
2195 spin_lock_irqsave(&port->lock, flags);
2196
2197 port->read_status_mask = ATMEL_US_OVRE;
2198 if (termios->c_iflag & INPCK)
2199 port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2200 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2201 port->read_status_mask |= ATMEL_US_RXBRK;
2202
2203 if (atmel_use_pdc_rx(port))
2204 /* need to enable error interrupts */
2205 atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask);
2206
2207 /*
2208 * Characters to ignore
2209 */
2210 port->ignore_status_mask = 0;
2211 if (termios->c_iflag & IGNPAR)
2212 port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2213 if (termios->c_iflag & IGNBRK) {
2214 port->ignore_status_mask |= ATMEL_US_RXBRK;
2215 /*
2216 * If we're ignoring parity and break indicators,
2217 * ignore overruns too (for real raw support).
2218 */
2219 if (termios->c_iflag & IGNPAR)
2220 port->ignore_status_mask |= ATMEL_US_OVRE;
2221 }
2222 /* TODO: Ignore all characters if CREAD is set.*/
2223
2224 /* update the per-port timeout */
2225 uart_update_timeout(port, termios->c_cflag, baud);
2226
2227 /*
2228 * save/disable interrupts. The tty layer will ensure that the
2229 * transmitter is empty if requested by the caller, so there's
2230 * no need to wait for it here.
2231 */
2232 imr = atmel_uart_readl(port, ATMEL_US_IMR);
2233 atmel_uart_writel(port, ATMEL_US_IDR, -1);
2234
2235 /* disable receiver and transmitter */
2236 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
2237 atmel_port->tx_stopped = true;
2238
2239 /* mode */
2240 if (port->rs485.flags & SER_RS485_ENABLED) {
2241 atmel_uart_writel(port, ATMEL_US_TTGR,
2242 port->rs485.delay_rts_after_send);
2243 mode |= ATMEL_US_USMODE_RS485;
2244 } else if (port->iso7816.flags & SER_ISO7816_ENABLED) {
2245 atmel_uart_writel(port, ATMEL_US_TTGR, port->iso7816.tg);
2246 /* select mck clock, and output */
2247 mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO;
2248 /* set max iterations */
2249 mode |= ATMEL_US_MAX_ITER(3);
2250 if ((port->iso7816.flags & SER_ISO7816_T_PARAM)
2251 == SER_ISO7816_T(0))
2252 mode |= ATMEL_US_USMODE_ISO7816_T0;
2253 else
2254 mode |= ATMEL_US_USMODE_ISO7816_T1;
2255 } else if (termios->c_cflag & CRTSCTS) {
2256 /* RS232 with hardware handshake (RTS/CTS) */
2257 if (atmel_use_fifo(port) &&
2258 !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
2259 /*
2260 * with ATMEL_US_USMODE_HWHS set, the controller will
2261 * be able to drive the RTS pin high/low when the RX
2262 * FIFO is above RXFTHRES/below RXFTHRES2.
2263 * It will also disable the transmitter when the CTS
2264 * pin is high.
2265 * This mode is not activated if CTS pin is a GPIO
2266 * because in this case, the transmitter is always
2267 * disabled (there must be an internal pull-up
2268 * responsible for this behaviour).
2269 * If the RTS pin is a GPIO, the controller won't be
2270 * able to drive it according to the FIFO thresholds,
2271 * but it will be handled by the driver.
2272 */
2273 mode |= ATMEL_US_USMODE_HWHS;
2274 } else {
2275 /*
2276 * For platforms without FIFO, the flow control is
2277 * handled by the driver.
2278 */
2279 mode |= ATMEL_US_USMODE_NORMAL;
2280 }
2281 } else {
2282 /* RS232 without hadware handshake */
2283 mode |= ATMEL_US_USMODE_NORMAL;
2284 }
2285
2286 /*
2287 * Set the baud rate:
2288 * Fractional baudrate allows to setup output frequency more
2289 * accurately. This feature is enabled only when using normal mode.
2290 * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8))
2291 * Currently, OVER is always set to 0 so we get
2292 * baudrate = selected clock / (16 * (CD + FP / 8))
2293 * then
2294 * 8 CD + FP = selected clock / (2 * baudrate)
2295 */
2296 if (atmel_port->has_frac_baudrate) {
2297 div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2);
2298 cd = div >> 3;
2299 fp = div & ATMEL_US_FP_MASK;
2300 } else {
2301 cd = uart_get_divisor(port, baud);
2302 }
2303
2304 if (cd > 65535) { /* BRGR is 16-bit, so switch to slower clock */
2305 cd /= 8;
2306 mode |= ATMEL_US_USCLKS_MCK_DIV8;
2307 }
2308 quot = cd | fp << ATMEL_US_FP_OFFSET;
2309
2310 if (!(port->iso7816.flags & SER_ISO7816_ENABLED))
2311 atmel_uart_writel(port, ATMEL_US_BRGR, quot);
2312
2313 /* set the mode, clock divisor, parity, stop bits and data size */
2314 atmel_uart_writel(port, ATMEL_US_MR, mode);
2315
2316 /*
2317 * when switching the mode, set the RTS line state according to the
2318 * new mode, otherwise keep the former state
2319 */
2320 if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
2321 unsigned int rts_state;
2322
2323 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
2324 /* let the hardware control the RTS line */
2325 rts_state = ATMEL_US_RTSDIS;
2326 } else {
2327 /* force RTS line to low level */
2328 rts_state = ATMEL_US_RTSEN;
2329 }
2330
2331 atmel_uart_writel(port, ATMEL_US_CR, rts_state);
2332 }
2333
2334 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2335 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2336 atmel_port->tx_stopped = false;
2337
2338 /* restore interrupts */
2339 atmel_uart_writel(port, ATMEL_US_IER, imr);
2340
2341 /* CTS flow-control and modem-status interrupts */
2342 if (UART_ENABLE_MS(port, termios->c_cflag))
2343 atmel_enable_ms(port);
2344 else
2345 atmel_disable_ms(port);
2346
2347 spin_unlock_irqrestore(&port->lock, flags);
2348 }
2349
atmel_set_ldisc(struct uart_port * port,struct ktermios * termios)2350 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
2351 {
2352 if (termios->c_line == N_PPS) {
2353 port->flags |= UPF_HARDPPS_CD;
2354 spin_lock_irq(&port->lock);
2355 atmel_enable_ms(port);
2356 spin_unlock_irq(&port->lock);
2357 } else {
2358 port->flags &= ~UPF_HARDPPS_CD;
2359 if (!UART_ENABLE_MS(port, termios->c_cflag)) {
2360 spin_lock_irq(&port->lock);
2361 atmel_disable_ms(port);
2362 spin_unlock_irq(&port->lock);
2363 }
2364 }
2365 }
2366
2367 /*
2368 * Return string describing the specified port
2369 */
atmel_type(struct uart_port * port)2370 static const char *atmel_type(struct uart_port *port)
2371 {
2372 return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
2373 }
2374
2375 /*
2376 * Release the memory region(s) being used by 'port'.
2377 */
atmel_release_port(struct uart_port * port)2378 static void atmel_release_port(struct uart_port *port)
2379 {
2380 struct platform_device *mpdev = to_platform_device(port->dev->parent);
2381 int size = resource_size(mpdev->resource);
2382
2383 release_mem_region(port->mapbase, size);
2384
2385 if (port->flags & UPF_IOREMAP) {
2386 iounmap(port->membase);
2387 port->membase = NULL;
2388 }
2389 }
2390
2391 /*
2392 * Request the memory region(s) being used by 'port'.
2393 */
atmel_request_port(struct uart_port * port)2394 static int atmel_request_port(struct uart_port *port)
2395 {
2396 struct platform_device *mpdev = to_platform_device(port->dev->parent);
2397 int size = resource_size(mpdev->resource);
2398
2399 if (!request_mem_region(port->mapbase, size, "atmel_serial"))
2400 return -EBUSY;
2401
2402 if (port->flags & UPF_IOREMAP) {
2403 port->membase = ioremap(port->mapbase, size);
2404 if (port->membase == NULL) {
2405 release_mem_region(port->mapbase, size);
2406 return -ENOMEM;
2407 }
2408 }
2409
2410 return 0;
2411 }
2412
2413 /*
2414 * Configure/autoconfigure the port.
2415 */
atmel_config_port(struct uart_port * port,int flags)2416 static void atmel_config_port(struct uart_port *port, int flags)
2417 {
2418 if (flags & UART_CONFIG_TYPE) {
2419 port->type = PORT_ATMEL;
2420 atmel_request_port(port);
2421 }
2422 }
2423
2424 /*
2425 * Verify the new serial_struct (for TIOCSSERIAL).
2426 */
atmel_verify_port(struct uart_port * port,struct serial_struct * ser)2427 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
2428 {
2429 int ret = 0;
2430 if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
2431 ret = -EINVAL;
2432 if (port->irq != ser->irq)
2433 ret = -EINVAL;
2434 if (ser->io_type != SERIAL_IO_MEM)
2435 ret = -EINVAL;
2436 if (port->uartclk / 16 != ser->baud_base)
2437 ret = -EINVAL;
2438 if (port->mapbase != (unsigned long)ser->iomem_base)
2439 ret = -EINVAL;
2440 if (port->iobase != ser->port)
2441 ret = -EINVAL;
2442 if (ser->hub6 != 0)
2443 ret = -EINVAL;
2444 return ret;
2445 }
2446
2447 #ifdef CONFIG_CONSOLE_POLL
atmel_poll_get_char(struct uart_port * port)2448 static int atmel_poll_get_char(struct uart_port *port)
2449 {
2450 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY))
2451 cpu_relax();
2452
2453 return atmel_uart_read_char(port);
2454 }
2455
atmel_poll_put_char(struct uart_port * port,unsigned char ch)2456 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
2457 {
2458 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2459 cpu_relax();
2460
2461 atmel_uart_write_char(port, ch);
2462 }
2463 #endif
2464
2465 static const struct uart_ops atmel_pops = {
2466 .tx_empty = atmel_tx_empty,
2467 .set_mctrl = atmel_set_mctrl,
2468 .get_mctrl = atmel_get_mctrl,
2469 .stop_tx = atmel_stop_tx,
2470 .start_tx = atmel_start_tx,
2471 .stop_rx = atmel_stop_rx,
2472 .enable_ms = atmel_enable_ms,
2473 .break_ctl = atmel_break_ctl,
2474 .startup = atmel_startup,
2475 .shutdown = atmel_shutdown,
2476 .flush_buffer = atmel_flush_buffer,
2477 .set_termios = atmel_set_termios,
2478 .set_ldisc = atmel_set_ldisc,
2479 .type = atmel_type,
2480 .release_port = atmel_release_port,
2481 .request_port = atmel_request_port,
2482 .config_port = atmel_config_port,
2483 .verify_port = atmel_verify_port,
2484 .pm = atmel_serial_pm,
2485 #ifdef CONFIG_CONSOLE_POLL
2486 .poll_get_char = atmel_poll_get_char,
2487 .poll_put_char = atmel_poll_put_char,
2488 #endif
2489 };
2490
2491 /*
2492 * Configure the port from the platform device resource info.
2493 */
atmel_init_port(struct atmel_uart_port * atmel_port,struct platform_device * pdev)2494 static int atmel_init_port(struct atmel_uart_port *atmel_port,
2495 struct platform_device *pdev)
2496 {
2497 int ret;
2498 struct uart_port *port = &atmel_port->uart;
2499 struct platform_device *mpdev = to_platform_device(pdev->dev.parent);
2500
2501 atmel_init_property(atmel_port, pdev);
2502 atmel_set_ops(port);
2503
2504 port->iotype = UPIO_MEM;
2505 port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
2506 port->ops = &atmel_pops;
2507 port->fifosize = 1;
2508 port->dev = &pdev->dev;
2509 port->mapbase = mpdev->resource[0].start;
2510 port->irq = mpdev->resource[1].start;
2511 port->rs485_config = atmel_config_rs485;
2512 port->iso7816_config = atmel_config_iso7816;
2513 port->membase = NULL;
2514
2515 memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
2516
2517 ret = uart_get_rs485_mode(port);
2518 if (ret)
2519 return ret;
2520
2521 /* for console, the clock could already be configured */
2522 if (!atmel_port->clk) {
2523 atmel_port->clk = clk_get(&mpdev->dev, "usart");
2524 if (IS_ERR(atmel_port->clk)) {
2525 ret = PTR_ERR(atmel_port->clk);
2526 atmel_port->clk = NULL;
2527 return ret;
2528 }
2529 ret = clk_prepare_enable(atmel_port->clk);
2530 if (ret) {
2531 clk_put(atmel_port->clk);
2532 atmel_port->clk = NULL;
2533 return ret;
2534 }
2535 port->uartclk = clk_get_rate(atmel_port->clk);
2536 clk_disable_unprepare(atmel_port->clk);
2537 /* only enable clock when USART is in use */
2538 }
2539
2540 /*
2541 * Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or
2542 * ENDTX|TXBUFE
2543 */
2544 if (atmel_uart_is_half_duplex(port))
2545 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
2546 else if (atmel_use_pdc_tx(port)) {
2547 port->fifosize = PDC_BUFFER_SIZE;
2548 atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
2549 } else {
2550 atmel_port->tx_done_mask = ATMEL_US_TXRDY;
2551 }
2552
2553 return 0;
2554 }
2555
2556 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
atmel_console_putchar(struct uart_port * port,int ch)2557 static void atmel_console_putchar(struct uart_port *port, int ch)
2558 {
2559 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2560 cpu_relax();
2561 atmel_uart_write_char(port, ch);
2562 }
2563
2564 /*
2565 * Interrupts are disabled on entering
2566 */
atmel_console_write(struct console * co,const char * s,u_int count)2567 static void atmel_console_write(struct console *co, const char *s, u_int count)
2568 {
2569 struct uart_port *port = &atmel_ports[co->index].uart;
2570 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2571 unsigned int status, imr;
2572 unsigned int pdc_tx;
2573
2574 /*
2575 * First, save IMR and then disable interrupts
2576 */
2577 imr = atmel_uart_readl(port, ATMEL_US_IMR);
2578 atmel_uart_writel(port, ATMEL_US_IDR,
2579 ATMEL_US_RXRDY | atmel_port->tx_done_mask);
2580
2581 /* Store PDC transmit status and disable it */
2582 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
2583 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
2584
2585 /* Make sure that tx path is actually able to send characters */
2586 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
2587 atmel_port->tx_stopped = false;
2588
2589 uart_console_write(port, s, count, atmel_console_putchar);
2590
2591 /*
2592 * Finally, wait for transmitter to become empty
2593 * and restore IMR
2594 */
2595 do {
2596 status = atmel_uart_readl(port, ATMEL_US_CSR);
2597 } while (!(status & ATMEL_US_TXRDY));
2598
2599 /* Restore PDC transmit status */
2600 if (pdc_tx)
2601 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
2602
2603 /* set interrupts back the way they were */
2604 atmel_uart_writel(port, ATMEL_US_IER, imr);
2605 }
2606
2607 /*
2608 * If the port was already initialised (eg, by a boot loader),
2609 * try to determine the current setup.
2610 */
atmel_console_get_options(struct uart_port * port,int * baud,int * parity,int * bits)2611 static void __init atmel_console_get_options(struct uart_port *port, int *baud,
2612 int *parity, int *bits)
2613 {
2614 unsigned int mr, quot;
2615
2616 /*
2617 * If the baud rate generator isn't running, the port wasn't
2618 * initialized by the boot loader.
2619 */
2620 quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD;
2621 if (!quot)
2622 return;
2623
2624 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL;
2625 if (mr == ATMEL_US_CHRL_8)
2626 *bits = 8;
2627 else
2628 *bits = 7;
2629
2630 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR;
2631 if (mr == ATMEL_US_PAR_EVEN)
2632 *parity = 'e';
2633 else if (mr == ATMEL_US_PAR_ODD)
2634 *parity = 'o';
2635
2636 *baud = port->uartclk / (16 * quot);
2637 }
2638
atmel_console_setup(struct console * co,char * options)2639 static int __init atmel_console_setup(struct console *co, char *options)
2640 {
2641 int ret;
2642 struct uart_port *port = &atmel_ports[co->index].uart;
2643 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2644 int baud = 115200;
2645 int bits = 8;
2646 int parity = 'n';
2647 int flow = 'n';
2648
2649 if (port->membase == NULL) {
2650 /* Port not initialized yet - delay setup */
2651 return -ENODEV;
2652 }
2653
2654 ret = clk_prepare_enable(atmel_ports[co->index].clk);
2655 if (ret)
2656 return ret;
2657
2658 atmel_uart_writel(port, ATMEL_US_IDR, -1);
2659 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2660 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2661 atmel_port->tx_stopped = false;
2662
2663 if (options)
2664 uart_parse_options(options, &baud, &parity, &bits, &flow);
2665 else
2666 atmel_console_get_options(port, &baud, &parity, &bits);
2667
2668 return uart_set_options(port, co, baud, parity, bits, flow);
2669 }
2670
2671 static struct uart_driver atmel_uart;
2672
2673 static struct console atmel_console = {
2674 .name = ATMEL_DEVICENAME,
2675 .write = atmel_console_write,
2676 .device = uart_console_device,
2677 .setup = atmel_console_setup,
2678 .flags = CON_PRINTBUFFER,
2679 .index = -1,
2680 .data = &atmel_uart,
2681 };
2682
2683 #define ATMEL_CONSOLE_DEVICE (&atmel_console)
2684
2685 #else
2686 #define ATMEL_CONSOLE_DEVICE NULL
2687 #endif
2688
2689 static struct uart_driver atmel_uart = {
2690 .owner = THIS_MODULE,
2691 .driver_name = "atmel_serial",
2692 .dev_name = ATMEL_DEVICENAME,
2693 .major = SERIAL_ATMEL_MAJOR,
2694 .minor = MINOR_START,
2695 .nr = ATMEL_MAX_UART,
2696 .cons = ATMEL_CONSOLE_DEVICE,
2697 };
2698
2699 #ifdef CONFIG_PM
atmel_serial_clk_will_stop(void)2700 static bool atmel_serial_clk_will_stop(void)
2701 {
2702 #ifdef CONFIG_ARCH_AT91
2703 return at91_suspend_entering_slow_clock();
2704 #else
2705 return false;
2706 #endif
2707 }
2708
atmel_serial_suspend(struct platform_device * pdev,pm_message_t state)2709 static int atmel_serial_suspend(struct platform_device *pdev,
2710 pm_message_t state)
2711 {
2712 struct uart_port *port = platform_get_drvdata(pdev);
2713 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2714
2715 if (uart_console(port) && console_suspend_enabled) {
2716 /* Drain the TX shifter */
2717 while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
2718 ATMEL_US_TXEMPTY))
2719 cpu_relax();
2720 }
2721
2722 if (uart_console(port) && !console_suspend_enabled) {
2723 /* Cache register values as we won't get a full shutdown/startup
2724 * cycle
2725 */
2726 atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR);
2727 atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR);
2728 atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR);
2729 atmel_port->cache.rtor = atmel_uart_readl(port,
2730 atmel_port->rtor);
2731 atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR);
2732 atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR);
2733 atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR);
2734 }
2735
2736 /* we can not wake up if we're running on slow clock */
2737 atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
2738 if (atmel_serial_clk_will_stop()) {
2739 unsigned long flags;
2740
2741 spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2742 atmel_port->suspended = true;
2743 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2744 device_set_wakeup_enable(&pdev->dev, 0);
2745 }
2746
2747 uart_suspend_port(&atmel_uart, port);
2748
2749 return 0;
2750 }
2751
atmel_serial_resume(struct platform_device * pdev)2752 static int atmel_serial_resume(struct platform_device *pdev)
2753 {
2754 struct uart_port *port = platform_get_drvdata(pdev);
2755 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2756 unsigned long flags;
2757
2758 if (uart_console(port) && !console_suspend_enabled) {
2759 atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr);
2760 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr);
2761 atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr);
2762 atmel_uart_writel(port, atmel_port->rtor,
2763 atmel_port->cache.rtor);
2764 atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr);
2765
2766 if (atmel_port->fifo_size) {
2767 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_FIFOEN |
2768 ATMEL_US_RXFCLR | ATMEL_US_TXFLCLR);
2769 atmel_uart_writel(port, ATMEL_US_FMR,
2770 atmel_port->cache.fmr);
2771 atmel_uart_writel(port, ATMEL_US_FIER,
2772 atmel_port->cache.fimr);
2773 }
2774 atmel_start_rx(port);
2775 }
2776
2777 spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2778 if (atmel_port->pending) {
2779 atmel_handle_receive(port, atmel_port->pending);
2780 atmel_handle_status(port, atmel_port->pending,
2781 atmel_port->pending_status);
2782 atmel_handle_transmit(port, atmel_port->pending);
2783 atmel_port->pending = 0;
2784 }
2785 atmel_port->suspended = false;
2786 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2787
2788 uart_resume_port(&atmel_uart, port);
2789 device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
2790
2791 return 0;
2792 }
2793 #else
2794 #define atmel_serial_suspend NULL
2795 #define atmel_serial_resume NULL
2796 #endif
2797
atmel_serial_probe_fifos(struct atmel_uart_port * atmel_port,struct platform_device * pdev)2798 static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
2799 struct platform_device *pdev)
2800 {
2801 atmel_port->fifo_size = 0;
2802 atmel_port->rts_low = 0;
2803 atmel_port->rts_high = 0;
2804
2805 if (of_property_read_u32(pdev->dev.of_node,
2806 "atmel,fifo-size",
2807 &atmel_port->fifo_size))
2808 return;
2809
2810 if (!atmel_port->fifo_size)
2811 return;
2812
2813 if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
2814 atmel_port->fifo_size = 0;
2815 dev_err(&pdev->dev, "Invalid FIFO size\n");
2816 return;
2817 }
2818
2819 /*
2820 * 0 <= rts_low <= rts_high <= fifo_size
2821 * Once their CTS line asserted by the remote peer, some x86 UARTs tend
2822 * to flush their internal TX FIFO, commonly up to 16 data, before
2823 * actually stopping to send new data. So we try to set the RTS High
2824 * Threshold to a reasonably high value respecting this 16 data
2825 * empirical rule when possible.
2826 */
2827 atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1,
2828 atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
2829 atmel_port->rts_low = max_t(int, atmel_port->fifo_size >> 2,
2830 atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET);
2831
2832 dev_info(&pdev->dev, "Using FIFO (%u data)\n",
2833 atmel_port->fifo_size);
2834 dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
2835 atmel_port->rts_high);
2836 dev_dbg(&pdev->dev, "RTS Low Threshold : %2u data\n",
2837 atmel_port->rts_low);
2838 }
2839
atmel_serial_probe(struct platform_device * pdev)2840 static int atmel_serial_probe(struct platform_device *pdev)
2841 {
2842 struct atmel_uart_port *atmel_port;
2843 struct device_node *np = pdev->dev.parent->of_node;
2844 void *data;
2845 int ret;
2846 bool rs485_enabled;
2847
2848 BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
2849
2850 /*
2851 * In device tree there is no node with "atmel,at91rm9200-usart-serial"
2852 * as compatible string. This driver is probed by at91-usart mfd driver
2853 * which is just a wrapper over the atmel_serial driver and
2854 * spi-at91-usart driver. All attributes needed by this driver are
2855 * found in of_node of parent.
2856 */
2857 pdev->dev.of_node = np;
2858
2859 ret = of_alias_get_id(np, "serial");
2860 if (ret < 0)
2861 /* port id not found in platform data nor device-tree aliases:
2862 * auto-enumerate it */
2863 ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
2864
2865 if (ret >= ATMEL_MAX_UART) {
2866 ret = -ENODEV;
2867 goto err;
2868 }
2869
2870 if (test_and_set_bit(ret, atmel_ports_in_use)) {
2871 /* port already in use */
2872 ret = -EBUSY;
2873 goto err;
2874 }
2875
2876 atmel_port = &atmel_ports[ret];
2877 atmel_port->backup_imr = 0;
2878 atmel_port->uart.line = ret;
2879 atmel_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_ATMEL_CONSOLE);
2880 atmel_serial_probe_fifos(atmel_port, pdev);
2881
2882 atomic_set(&atmel_port->tasklet_shutdown, 0);
2883 spin_lock_init(&atmel_port->lock_suspended);
2884
2885 ret = atmel_init_port(atmel_port, pdev);
2886 if (ret)
2887 goto err_clear_bit;
2888
2889 atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0);
2890 if (IS_ERR(atmel_port->gpios)) {
2891 ret = PTR_ERR(atmel_port->gpios);
2892 goto err_clear_bit;
2893 }
2894
2895 if (!atmel_use_pdc_rx(&atmel_port->uart)) {
2896 ret = -ENOMEM;
2897 data = kmalloc_array(ATMEL_SERIAL_RINGSIZE,
2898 sizeof(struct atmel_uart_char),
2899 GFP_KERNEL);
2900 if (!data)
2901 goto err_alloc_ring;
2902 atmel_port->rx_ring.buf = data;
2903 }
2904
2905 rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED;
2906
2907 ret = uart_add_one_port(&atmel_uart, &atmel_port->uart);
2908 if (ret)
2909 goto err_add_port;
2910
2911 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2912 if (uart_console(&atmel_port->uart)
2913 && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
2914 /*
2915 * The serial core enabled the clock for us, so undo
2916 * the clk_prepare_enable() in atmel_console_setup()
2917 */
2918 clk_disable_unprepare(atmel_port->clk);
2919 }
2920 #endif
2921
2922 device_init_wakeup(&pdev->dev, 1);
2923 platform_set_drvdata(pdev, atmel_port);
2924
2925 /*
2926 * The peripheral clock has been disabled by atmel_init_port():
2927 * enable it before accessing I/O registers
2928 */
2929 clk_prepare_enable(atmel_port->clk);
2930
2931 if (rs485_enabled) {
2932 atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR,
2933 ATMEL_US_USMODE_NORMAL);
2934 atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR,
2935 ATMEL_US_RTSEN);
2936 }
2937
2938 /*
2939 * Get port name of usart or uart
2940 */
2941 atmel_get_ip_name(&atmel_port->uart);
2942
2943 /*
2944 * The peripheral clock can now safely be disabled till the port
2945 * is used
2946 */
2947 clk_disable_unprepare(atmel_port->clk);
2948
2949 return 0;
2950
2951 err_add_port:
2952 kfree(atmel_port->rx_ring.buf);
2953 atmel_port->rx_ring.buf = NULL;
2954 err_alloc_ring:
2955 if (!uart_console(&atmel_port->uart)) {
2956 clk_put(atmel_port->clk);
2957 atmel_port->clk = NULL;
2958 }
2959 err_clear_bit:
2960 clear_bit(atmel_port->uart.line, atmel_ports_in_use);
2961 err:
2962 return ret;
2963 }
2964
2965 /*
2966 * Even if the driver is not modular, it makes sense to be able to
2967 * unbind a device: there can be many bound devices, and there are
2968 * situations where dynamic binding and unbinding can be useful.
2969 *
2970 * For example, a connected device can require a specific firmware update
2971 * protocol that needs bitbanging on IO lines, but use the regular serial
2972 * port in the normal case.
2973 */
atmel_serial_remove(struct platform_device * pdev)2974 static int atmel_serial_remove(struct platform_device *pdev)
2975 {
2976 struct uart_port *port = platform_get_drvdata(pdev);
2977 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2978 int ret = 0;
2979
2980 tasklet_kill(&atmel_port->tasklet_rx);
2981 tasklet_kill(&atmel_port->tasklet_tx);
2982
2983 device_init_wakeup(&pdev->dev, 0);
2984
2985 ret = uart_remove_one_port(&atmel_uart, port);
2986
2987 kfree(atmel_port->rx_ring.buf);
2988
2989 /* "port" is allocated statically, so we shouldn't free it */
2990
2991 clear_bit(port->line, atmel_ports_in_use);
2992
2993 clk_put(atmel_port->clk);
2994 atmel_port->clk = NULL;
2995 pdev->dev.of_node = NULL;
2996
2997 return ret;
2998 }
2999
3000 static struct platform_driver atmel_serial_driver = {
3001 .probe = atmel_serial_probe,
3002 .remove = atmel_serial_remove,
3003 .suspend = atmel_serial_suspend,
3004 .resume = atmel_serial_resume,
3005 .driver = {
3006 .name = "atmel_usart_serial",
3007 .of_match_table = of_match_ptr(atmel_serial_dt_ids),
3008 },
3009 };
3010
atmel_serial_init(void)3011 static int __init atmel_serial_init(void)
3012 {
3013 int ret;
3014
3015 ret = uart_register_driver(&atmel_uart);
3016 if (ret)
3017 return ret;
3018
3019 ret = platform_driver_register(&atmel_serial_driver);
3020 if (ret)
3021 uart_unregister_driver(&atmel_uart);
3022
3023 return ret;
3024 }
3025 device_initcall(atmel_serial_init);
3026