1 /*
2 * Driver for Motorola IMX serial ports
3 *
4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
5 *
6 * Author: Sascha Hauer <sascha@saschahauer.de>
7 * Copyright (C) 2004 Pengutronix
8 *
9 * Copyright (C) 2009 emlix GmbH
10 * Author: Fabian Godehardt (added IrDA support for iMX)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 * [29-Mar-2005] Mike Lee
27 * Added hardware handshake
28 */
29
30 #if defined(CONFIG_SERIAL_IMX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
31 #define SUPPORT_SYSRQ
32 #endif
33
34 #include <linux/module.h>
35 #include <linux/ioport.h>
36 #include <linux/init.h>
37 #include <linux/console.h>
38 #include <linux/sysrq.h>
39 #include <linux/platform_device.h>
40 #include <linux/tty.h>
41 #include <linux/tty_flip.h>
42 #include <linux/serial_core.h>
43 #include <linux/serial.h>
44 #include <linux/clk.h>
45 #include <linux/delay.h>
46 #include <linux/rational.h>
47 #include <linux/slab.h>
48 #include <linux/of.h>
49 #include <linux/of_device.h>
50 #include <linux/io.h>
51 #include <linux/dma-mapping.h>
52
53 #include <asm/irq.h>
54 #include <linux/platform_data/serial-imx.h>
55 #include <linux/platform_data/dma-imx.h>
56
57 /* Register definitions */
58 #define URXD0 0x0 /* Receiver Register */
59 #define URTX0 0x40 /* Transmitter Register */
60 #define UCR1 0x80 /* Control Register 1 */
61 #define UCR2 0x84 /* Control Register 2 */
62 #define UCR3 0x88 /* Control Register 3 */
63 #define UCR4 0x8c /* Control Register 4 */
64 #define UFCR 0x90 /* FIFO Control Register */
65 #define USR1 0x94 /* Status Register 1 */
66 #define USR2 0x98 /* Status Register 2 */
67 #define UESC 0x9c /* Escape Character Register */
68 #define UTIM 0xa0 /* Escape Timer Register */
69 #define UBIR 0xa4 /* BRM Incremental Register */
70 #define UBMR 0xa8 /* BRM Modulator Register */
71 #define UBRC 0xac /* Baud Rate Count Register */
72 #define IMX21_ONEMS 0xb0 /* One Millisecond register */
73 #define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */
74 #define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/
75
76 /* UART Control Register Bit Fields.*/
77 #define URXD_CHARRDY (1<<15)
78 #define URXD_ERR (1<<14)
79 #define URXD_OVRRUN (1<<13)
80 #define URXD_FRMERR (1<<12)
81 #define URXD_BRK (1<<11)
82 #define URXD_PRERR (1<<10)
83 #define URXD_RX_DATA (0xFF<<0)
84 #define UCR1_ADEN (1<<15) /* Auto detect interrupt */
85 #define UCR1_ADBR (1<<14) /* Auto detect baud rate */
86 #define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */
87 #define UCR1_IDEN (1<<12) /* Idle condition interrupt */
88 #define UCR1_ICD_REG(x) (((x) & 3) << 10) /* idle condition detect */
89 #define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */
90 #define UCR1_RDMAEN (1<<8) /* Recv ready DMA enable */
91 #define UCR1_IREN (1<<7) /* Infrared interface enable */
92 #define UCR1_TXMPTYEN (1<<6) /* Transimitter empty interrupt enable */
93 #define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */
94 #define UCR1_SNDBRK (1<<4) /* Send break */
95 #define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */
96 #define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */
97 #define UCR1_ATDMAEN (1<<2) /* Aging DMA Timer Enable */
98 #define UCR1_DOZE (1<<1) /* Doze */
99 #define UCR1_UARTEN (1<<0) /* UART enabled */
100 #define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */
101 #define UCR2_IRTS (1<<14) /* Ignore RTS pin */
102 #define UCR2_CTSC (1<<13) /* CTS pin control */
103 #define UCR2_CTS (1<<12) /* Clear to send */
104 #define UCR2_ESCEN (1<<11) /* Escape enable */
105 #define UCR2_PREN (1<<8) /* Parity enable */
106 #define UCR2_PROE (1<<7) /* Parity odd/even */
107 #define UCR2_STPB (1<<6) /* Stop */
108 #define UCR2_WS (1<<5) /* Word size */
109 #define UCR2_RTSEN (1<<4) /* Request to send interrupt enable */
110 #define UCR2_ATEN (1<<3) /* Aging Timer Enable */
111 #define UCR2_TXEN (1<<2) /* Transmitter enabled */
112 #define UCR2_RXEN (1<<1) /* Receiver enabled */
113 #define UCR2_SRST (1<<0) /* SW reset */
114 #define UCR3_DTREN (1<<13) /* DTR interrupt enable */
115 #define UCR3_PARERREN (1<<12) /* Parity enable */
116 #define UCR3_FRAERREN (1<<11) /* Frame error interrupt enable */
117 #define UCR3_DSR (1<<10) /* Data set ready */
118 #define UCR3_DCD (1<<9) /* Data carrier detect */
119 #define UCR3_RI (1<<8) /* Ring indicator */
120 #define UCR3_ADNIMP (1<<7) /* Autobaud Detection Not Improved */
121 #define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */
122 #define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */
123 #define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */
124 #define IMX21_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select */
125 #define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
126 #define UCR3_BPEN (1<<0) /* Preset registers enable */
127 #define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */
128 #define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */
129 #define UCR4_INVR (1<<9) /* Inverted infrared reception */
130 #define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */
131 #define UCR4_WKEN (1<<7) /* Wake interrupt enable */
132 #define UCR4_REF16 (1<<6) /* Ref freq 16 MHz */
133 #define UCR4_IDDMAEN (1<<6) /* DMA IDLE Condition Detected */
134 #define UCR4_IRSC (1<<5) /* IR special case */
135 #define UCR4_TCEN (1<<3) /* Transmit complete interrupt enable */
136 #define UCR4_BKEN (1<<2) /* Break condition interrupt enable */
137 #define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */
138 #define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */
139 #define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */
140 #define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */
141 #define UFCR_RFDIV (7<<7) /* Reference freq divider mask */
142 #define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7)
143 #define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */
144 #define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */
145 #define USR1_RTSS (1<<14) /* RTS pin status */
146 #define USR1_TRDY (1<<13) /* Transmitter ready interrupt/dma flag */
147 #define USR1_RTSD (1<<12) /* RTS delta */
148 #define USR1_ESCF (1<<11) /* Escape seq interrupt flag */
149 #define USR1_FRAMERR (1<<10) /* Frame error interrupt flag */
150 #define USR1_RRDY (1<<9) /* Receiver ready interrupt/dma flag */
151 #define USR1_TIMEOUT (1<<7) /* Receive timeout interrupt status */
152 #define USR1_RXDS (1<<6) /* Receiver idle interrupt flag */
153 #define USR1_AIRINT (1<<5) /* Async IR wake interrupt flag */
154 #define USR1_AWAKE (1<<4) /* Aysnc wake interrupt flag */
155 #define USR2_ADET (1<<15) /* Auto baud rate detect complete */
156 #define USR2_TXFE (1<<14) /* Transmit buffer FIFO empty */
157 #define USR2_DTRF (1<<13) /* DTR edge interrupt flag */
158 #define USR2_IDLE (1<<12) /* Idle condition */
159 #define USR2_IRINT (1<<8) /* Serial infrared interrupt flag */
160 #define USR2_WAKE (1<<7) /* Wake */
161 #define USR2_RTSF (1<<4) /* RTS edge interrupt flag */
162 #define USR2_TXDC (1<<3) /* Transmitter complete */
163 #define USR2_BRCD (1<<2) /* Break condition */
164 #define USR2_ORE (1<<1) /* Overrun error */
165 #define USR2_RDR (1<<0) /* Recv data ready */
166 #define UTS_FRCPERR (1<<13) /* Force parity error */
167 #define UTS_LOOP (1<<12) /* Loop tx and rx */
168 #define UTS_TXEMPTY (1<<6) /* TxFIFO empty */
169 #define UTS_RXEMPTY (1<<5) /* RxFIFO empty */
170 #define UTS_TXFULL (1<<4) /* TxFIFO full */
171 #define UTS_RXFULL (1<<3) /* RxFIFO full */
172 #define UTS_SOFTRST (1<<0) /* Software reset */
173
174 /* We've been assigned a range on the "Low-density serial ports" major */
175 #define SERIAL_IMX_MAJOR 207
176 #define MINOR_START 16
177 #define DEV_NAME "ttymxc"
178
179 /*
180 * This determines how often we check the modem status signals
181 * for any change. They generally aren't connected to an IRQ
182 * so we have to poll them. We also check immediately before
183 * filling the TX fifo incase CTS has been dropped.
184 */
185 #define MCTRL_TIMEOUT (250*HZ/1000)
186
187 #define DRIVER_NAME "IMX-uart"
188
189 #define UART_NR 8
190
191 /* i.mx21 type uart runs on all i.mx except i.mx1 */
192 enum imx_uart_type {
193 IMX1_UART,
194 IMX21_UART,
195 IMX6Q_UART,
196 };
197
198 /* device type dependent stuff */
199 struct imx_uart_data {
200 unsigned uts_reg;
201 enum imx_uart_type devtype;
202 };
203
204 struct imx_port {
205 struct uart_port port;
206 struct timer_list timer;
207 unsigned int old_status;
208 int txirq, rxirq, rtsirq;
209 unsigned int have_rtscts:1;
210 unsigned int dte_mode:1;
211 unsigned int use_irda:1;
212 unsigned int irda_inv_rx:1;
213 unsigned int irda_inv_tx:1;
214 unsigned short trcv_delay; /* transceiver delay */
215 struct clk *clk_ipg;
216 struct clk *clk_per;
217 const struct imx_uart_data *devdata;
218
219 /* DMA fields */
220 unsigned int dma_is_inited:1;
221 unsigned int dma_is_enabled:1;
222 unsigned int dma_is_rxing:1;
223 unsigned int dma_is_txing:1;
224 struct dma_chan *dma_chan_rx, *dma_chan_tx;
225 struct scatterlist rx_sgl, tx_sgl[2];
226 void *rx_buf;
227 unsigned int tx_bytes;
228 unsigned int dma_tx_nents;
229 wait_queue_head_t dma_wait;
230 };
231
232 struct imx_port_ucrs {
233 unsigned int ucr1;
234 unsigned int ucr2;
235 unsigned int ucr3;
236 };
237
238 #ifdef CONFIG_IRDA
239 #define USE_IRDA(sport) ((sport)->use_irda)
240 #else
241 #define USE_IRDA(sport) (0)
242 #endif
243
244 static struct imx_uart_data imx_uart_devdata[] = {
245 [IMX1_UART] = {
246 .uts_reg = IMX1_UTS,
247 .devtype = IMX1_UART,
248 },
249 [IMX21_UART] = {
250 .uts_reg = IMX21_UTS,
251 .devtype = IMX21_UART,
252 },
253 [IMX6Q_UART] = {
254 .uts_reg = IMX21_UTS,
255 .devtype = IMX6Q_UART,
256 },
257 };
258
259 static struct platform_device_id imx_uart_devtype[] = {
260 {
261 .name = "imx1-uart",
262 .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX1_UART],
263 }, {
264 .name = "imx21-uart",
265 .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX21_UART],
266 }, {
267 .name = "imx6q-uart",
268 .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX6Q_UART],
269 }, {
270 /* sentinel */
271 }
272 };
273 MODULE_DEVICE_TABLE(platform, imx_uart_devtype);
274
275 static struct of_device_id imx_uart_dt_ids[] = {
276 { .compatible = "fsl,imx6q-uart", .data = &imx_uart_devdata[IMX6Q_UART], },
277 { .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], },
278 { .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], },
279 { /* sentinel */ }
280 };
281 MODULE_DEVICE_TABLE(of, imx_uart_dt_ids);
282
uts_reg(struct imx_port * sport)283 static inline unsigned uts_reg(struct imx_port *sport)
284 {
285 return sport->devdata->uts_reg;
286 }
287
is_imx1_uart(struct imx_port * sport)288 static inline int is_imx1_uart(struct imx_port *sport)
289 {
290 return sport->devdata->devtype == IMX1_UART;
291 }
292
is_imx21_uart(struct imx_port * sport)293 static inline int is_imx21_uart(struct imx_port *sport)
294 {
295 return sport->devdata->devtype == IMX21_UART;
296 }
297
is_imx6q_uart(struct imx_port * sport)298 static inline int is_imx6q_uart(struct imx_port *sport)
299 {
300 return sport->devdata->devtype == IMX6Q_UART;
301 }
302 /*
303 * Save and restore functions for UCR1, UCR2 and UCR3 registers
304 */
305 #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_IMX_CONSOLE)
imx_port_ucrs_save(struct uart_port * port,struct imx_port_ucrs * ucr)306 static void imx_port_ucrs_save(struct uart_port *port,
307 struct imx_port_ucrs *ucr)
308 {
309 /* save control registers */
310 ucr->ucr1 = readl(port->membase + UCR1);
311 ucr->ucr2 = readl(port->membase + UCR2);
312 ucr->ucr3 = readl(port->membase + UCR3);
313 }
314
imx_port_ucrs_restore(struct uart_port * port,struct imx_port_ucrs * ucr)315 static void imx_port_ucrs_restore(struct uart_port *port,
316 struct imx_port_ucrs *ucr)
317 {
318 /* restore control registers */
319 writel(ucr->ucr1, port->membase + UCR1);
320 writel(ucr->ucr2, port->membase + UCR2);
321 writel(ucr->ucr3, port->membase + UCR3);
322 }
323 #endif
324
325 /*
326 * Handle any change of modem status signal since we were last called.
327 */
imx_mctrl_check(struct imx_port * sport)328 static void imx_mctrl_check(struct imx_port *sport)
329 {
330 unsigned int status, changed;
331
332 status = sport->port.ops->get_mctrl(&sport->port);
333 changed = status ^ sport->old_status;
334
335 if (changed == 0)
336 return;
337
338 sport->old_status = status;
339
340 if (changed & TIOCM_RI)
341 sport->port.icount.rng++;
342 if (changed & TIOCM_DSR)
343 sport->port.icount.dsr++;
344 if (changed & TIOCM_CAR)
345 uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
346 if (changed & TIOCM_CTS)
347 uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
348
349 wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
350 }
351
352 /*
353 * This is our per-port timeout handler, for checking the
354 * modem status signals.
355 */
imx_timeout(unsigned long data)356 static void imx_timeout(unsigned long data)
357 {
358 struct imx_port *sport = (struct imx_port *)data;
359 unsigned long flags;
360
361 if (sport->port.state) {
362 spin_lock_irqsave(&sport->port.lock, flags);
363 imx_mctrl_check(sport);
364 spin_unlock_irqrestore(&sport->port.lock, flags);
365
366 mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
367 }
368 }
369
370 /*
371 * interrupts disabled on entry
372 */
imx_stop_tx(struct uart_port * port)373 static void imx_stop_tx(struct uart_port *port)
374 {
375 struct imx_port *sport = (struct imx_port *)port;
376 unsigned long temp;
377
378 if (USE_IRDA(sport)) {
379 /* half duplex - wait for end of transmission */
380 int n = 256;
381 while ((--n > 0) &&
382 !(readl(sport->port.membase + USR2) & USR2_TXDC)) {
383 udelay(5);
384 barrier();
385 }
386 /*
387 * irda transceiver - wait a bit more to avoid
388 * cutoff, hardware dependent
389 */
390 udelay(sport->trcv_delay);
391
392 /*
393 * half duplex - reactivate receive mode,
394 * flush receive pipe echo crap
395 */
396 if (readl(sport->port.membase + USR2) & USR2_TXDC) {
397 temp = readl(sport->port.membase + UCR1);
398 temp &= ~(UCR1_TXMPTYEN | UCR1_TRDYEN);
399 writel(temp, sport->port.membase + UCR1);
400
401 temp = readl(sport->port.membase + UCR4);
402 temp &= ~(UCR4_TCEN);
403 writel(temp, sport->port.membase + UCR4);
404
405 while (readl(sport->port.membase + URXD0) &
406 URXD_CHARRDY)
407 barrier();
408
409 temp = readl(sport->port.membase + UCR1);
410 temp |= UCR1_RRDYEN;
411 writel(temp, sport->port.membase + UCR1);
412
413 temp = readl(sport->port.membase + UCR4);
414 temp |= UCR4_DREN;
415 writel(temp, sport->port.membase + UCR4);
416 }
417 return;
418 }
419
420 /*
421 * We are maybe in the SMP context, so if the DMA TX thread is running
422 * on other cpu, we have to wait for it to finish.
423 */
424 if (sport->dma_is_enabled && sport->dma_is_txing)
425 return;
426
427 temp = readl(sport->port.membase + UCR1);
428 writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1);
429 }
430
431 /*
432 * interrupts disabled on entry
433 */
imx_stop_rx(struct uart_port * port)434 static void imx_stop_rx(struct uart_port *port)
435 {
436 struct imx_port *sport = (struct imx_port *)port;
437 unsigned long temp;
438
439 if (sport->dma_is_enabled && sport->dma_is_rxing) {
440 if (sport->port.suspended) {
441 dmaengine_terminate_all(sport->dma_chan_rx);
442 sport->dma_is_rxing = 0;
443 } else {
444 return;
445 }
446 }
447
448 temp = readl(sport->port.membase + UCR2);
449 writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2);
450
451 /* disable the `Receiver Ready Interrrupt` */
452 temp = readl(sport->port.membase + UCR1);
453 writel(temp & ~UCR1_RRDYEN, sport->port.membase + UCR1);
454 }
455
456 /*
457 * Set the modem control timer to fire immediately.
458 */
imx_enable_ms(struct uart_port * port)459 static void imx_enable_ms(struct uart_port *port)
460 {
461 struct imx_port *sport = (struct imx_port *)port;
462
463 mod_timer(&sport->timer, jiffies);
464 }
465
imx_transmit_buffer(struct imx_port * sport)466 static inline void imx_transmit_buffer(struct imx_port *sport)
467 {
468 struct circ_buf *xmit = &sport->port.state->xmit;
469
470 if (sport->port.x_char) {
471 /* Send next char */
472 writel(sport->port.x_char, sport->port.membase + URTX0);
473 return;
474 }
475
476 if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
477 imx_stop_tx(&sport->port);
478 return;
479 }
480
481 while (!uart_circ_empty(xmit) &&
482 !(readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)) {
483 /* send xmit->buf[xmit->tail]
484 * out the port here */
485 writel(xmit->buf[xmit->tail], sport->port.membase + URTX0);
486 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
487 sport->port.icount.tx++;
488 }
489
490 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
491 uart_write_wakeup(&sport->port);
492
493 if (uart_circ_empty(xmit))
494 imx_stop_tx(&sport->port);
495 }
496
dma_tx_callback(void * data)497 static void dma_tx_callback(void *data)
498 {
499 struct imx_port *sport = data;
500 struct scatterlist *sgl = &sport->tx_sgl[0];
501 struct circ_buf *xmit = &sport->port.state->xmit;
502 unsigned long flags;
503
504 dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
505
506 sport->dma_is_txing = 0;
507
508 /* update the stat */
509 spin_lock_irqsave(&sport->port.lock, flags);
510 xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1);
511 sport->port.icount.tx += sport->tx_bytes;
512 spin_unlock_irqrestore(&sport->port.lock, flags);
513
514 dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
515
516 uart_write_wakeup(&sport->port);
517
518 if (waitqueue_active(&sport->dma_wait)) {
519 wake_up(&sport->dma_wait);
520 dev_dbg(sport->port.dev, "exit in %s.\n", __func__);
521 return;
522 }
523 }
524
imx_dma_tx(struct imx_port * sport)525 static void imx_dma_tx(struct imx_port *sport)
526 {
527 struct circ_buf *xmit = &sport->port.state->xmit;
528 struct scatterlist *sgl = sport->tx_sgl;
529 struct dma_async_tx_descriptor *desc;
530 struct dma_chan *chan = sport->dma_chan_tx;
531 struct device *dev = sport->port.dev;
532 enum dma_status status;
533 int ret;
534
535 status = dmaengine_tx_status(chan, (dma_cookie_t)0, NULL);
536 if (DMA_IN_PROGRESS == status)
537 return;
538
539 sport->tx_bytes = uart_circ_chars_pending(xmit);
540
541 if (xmit->tail > xmit->head && xmit->head > 0) {
542 sport->dma_tx_nents = 2;
543 sg_init_table(sgl, 2);
544 sg_set_buf(sgl, xmit->buf + xmit->tail,
545 UART_XMIT_SIZE - xmit->tail);
546 sg_set_buf(sgl + 1, xmit->buf, xmit->head);
547 } else {
548 sport->dma_tx_nents = 1;
549 sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
550 }
551
552 ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
553 if (ret == 0) {
554 dev_err(dev, "DMA mapping error for TX.\n");
555 return;
556 }
557 desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
558 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
559 if (!desc) {
560 dev_err(dev, "We cannot prepare for the TX slave dma!\n");
561 return;
562 }
563 desc->callback = dma_tx_callback;
564 desc->callback_param = sport;
565
566 dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
567 uart_circ_chars_pending(xmit));
568 /* fire it */
569 sport->dma_is_txing = 1;
570 dmaengine_submit(desc);
571 dma_async_issue_pending(chan);
572 return;
573 }
574
575 /*
576 * interrupts disabled on entry
577 */
imx_start_tx(struct uart_port * port)578 static void imx_start_tx(struct uart_port *port)
579 {
580 struct imx_port *sport = (struct imx_port *)port;
581 unsigned long temp;
582
583 if (USE_IRDA(sport)) {
584 /* half duplex in IrDA mode; have to disable receive mode */
585 temp = readl(sport->port.membase + UCR4);
586 temp &= ~(UCR4_DREN);
587 writel(temp, sport->port.membase + UCR4);
588
589 temp = readl(sport->port.membase + UCR1);
590 temp &= ~(UCR1_RRDYEN);
591 writel(temp, sport->port.membase + UCR1);
592 }
593
594 if (!sport->dma_is_enabled) {
595 temp = readl(sport->port.membase + UCR1);
596 writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1);
597 }
598
599 if (USE_IRDA(sport)) {
600 temp = readl(sport->port.membase + UCR1);
601 temp |= UCR1_TRDYEN;
602 writel(temp, sport->port.membase + UCR1);
603
604 temp = readl(sport->port.membase + UCR4);
605 temp |= UCR4_TCEN;
606 writel(temp, sport->port.membase + UCR4);
607 }
608
609 if (sport->dma_is_enabled) {
610 /* FIXME: port->x_char must be transmitted if != 0 */
611 if (!uart_circ_empty(&port->state->xmit) &&
612 !uart_tx_stopped(port))
613 imx_dma_tx(sport);
614 return;
615 }
616
617 if (readl(sport->port.membase + uts_reg(sport)) & UTS_TXEMPTY)
618 imx_transmit_buffer(sport);
619 }
620
imx_rtsint(int irq,void * dev_id)621 static irqreturn_t imx_rtsint(int irq, void *dev_id)
622 {
623 struct imx_port *sport = dev_id;
624 unsigned int val;
625 unsigned long flags;
626
627 spin_lock_irqsave(&sport->port.lock, flags);
628
629 writel(USR1_RTSD, sport->port.membase + USR1);
630 val = readl(sport->port.membase + USR1) & USR1_RTSS;
631 uart_handle_cts_change(&sport->port, !!val);
632 wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
633
634 spin_unlock_irqrestore(&sport->port.lock, flags);
635 return IRQ_HANDLED;
636 }
637
imx_txint(int irq,void * dev_id)638 static irqreturn_t imx_txint(int irq, void *dev_id)
639 {
640 struct imx_port *sport = dev_id;
641 unsigned long flags;
642
643 spin_lock_irqsave(&sport->port.lock, flags);
644 imx_transmit_buffer(sport);
645 spin_unlock_irqrestore(&sport->port.lock, flags);
646 return IRQ_HANDLED;
647 }
648
imx_rxint(int irq,void * dev_id)649 static irqreturn_t imx_rxint(int irq, void *dev_id)
650 {
651 struct imx_port *sport = dev_id;
652 unsigned int rx, flg, ignored = 0;
653 struct tty_port *port = &sport->port.state->port;
654 unsigned long flags, temp;
655
656 spin_lock_irqsave(&sport->port.lock, flags);
657
658 while (readl(sport->port.membase + USR2) & USR2_RDR) {
659 flg = TTY_NORMAL;
660 sport->port.icount.rx++;
661
662 rx = readl(sport->port.membase + URXD0);
663
664 temp = readl(sport->port.membase + USR2);
665 if (temp & USR2_BRCD) {
666 writel(USR2_BRCD, sport->port.membase + USR2);
667 if (uart_handle_break(&sport->port))
668 continue;
669 }
670
671 if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
672 continue;
673
674 if (unlikely(rx & URXD_ERR)) {
675 if (rx & URXD_BRK)
676 sport->port.icount.brk++;
677 else if (rx & URXD_PRERR)
678 sport->port.icount.parity++;
679 else if (rx & URXD_FRMERR)
680 sport->port.icount.frame++;
681 if (rx & URXD_OVRRUN)
682 sport->port.icount.overrun++;
683
684 if (rx & sport->port.ignore_status_mask) {
685 if (++ignored > 100)
686 goto out;
687 continue;
688 }
689
690 rx &= sport->port.read_status_mask;
691
692 if (rx & URXD_BRK)
693 flg = TTY_BREAK;
694 else if (rx & URXD_PRERR)
695 flg = TTY_PARITY;
696 else if (rx & URXD_FRMERR)
697 flg = TTY_FRAME;
698 if (rx & URXD_OVRRUN)
699 flg = TTY_OVERRUN;
700
701 #ifdef SUPPORT_SYSRQ
702 sport->port.sysrq = 0;
703 #endif
704 }
705
706 tty_insert_flip_char(port, rx, flg);
707 }
708
709 out:
710 spin_unlock_irqrestore(&sport->port.lock, flags);
711 tty_flip_buffer_push(port);
712 return IRQ_HANDLED;
713 }
714
715 static int start_rx_dma(struct imx_port *sport);
716 /*
717 * If the RXFIFO is filled with some data, and then we
718 * arise a DMA operation to receive them.
719 */
imx_dma_rxint(struct imx_port * sport)720 static void imx_dma_rxint(struct imx_port *sport)
721 {
722 unsigned long temp;
723
724 temp = readl(sport->port.membase + USR2);
725 if ((temp & USR2_RDR) && !sport->dma_is_rxing) {
726 sport->dma_is_rxing = 1;
727
728 /* disable the `Recerver Ready Interrrupt` */
729 temp = readl(sport->port.membase + UCR1);
730 temp &= ~(UCR1_RRDYEN);
731 writel(temp, sport->port.membase + UCR1);
732
733 /* tell the DMA to receive the data. */
734 start_rx_dma(sport);
735 }
736 }
737
imx_int(int irq,void * dev_id)738 static irqreturn_t imx_int(int irq, void *dev_id)
739 {
740 struct imx_port *sport = dev_id;
741 unsigned int sts;
742 unsigned int sts2;
743
744 sts = readl(sport->port.membase + USR1);
745
746 if (sts & USR1_RRDY) {
747 if (sport->dma_is_enabled)
748 imx_dma_rxint(sport);
749 else
750 imx_rxint(irq, dev_id);
751 }
752
753 if (sts & USR1_TRDY &&
754 readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN)
755 imx_txint(irq, dev_id);
756
757 if (sts & USR1_RTSD)
758 imx_rtsint(irq, dev_id);
759
760 if (sts & USR1_AWAKE)
761 writel(USR1_AWAKE, sport->port.membase + USR1);
762
763 sts2 = readl(sport->port.membase + USR2);
764 if (sts2 & USR2_ORE) {
765 dev_err(sport->port.dev, "Rx FIFO overrun\n");
766 sport->port.icount.overrun++;
767 writel(USR2_ORE, sport->port.membase + USR2);
768 }
769
770 return IRQ_HANDLED;
771 }
772
773 /*
774 * Return TIOCSER_TEMT when transmitter is not busy.
775 */
imx_tx_empty(struct uart_port * port)776 static unsigned int imx_tx_empty(struct uart_port *port)
777 {
778 struct imx_port *sport = (struct imx_port *)port;
779 unsigned int ret;
780
781 ret = (readl(sport->port.membase + USR2) & USR2_TXDC) ? TIOCSER_TEMT : 0;
782
783 /* If the TX DMA is working, return 0. */
784 if (sport->dma_is_enabled && sport->dma_is_txing)
785 ret = 0;
786
787 return ret;
788 }
789
790 /*
791 * We have a modem side uart, so the meanings of RTS and CTS are inverted.
792 */
imx_get_mctrl(struct uart_port * port)793 static unsigned int imx_get_mctrl(struct uart_port *port)
794 {
795 struct imx_port *sport = (struct imx_port *)port;
796 unsigned int tmp = TIOCM_DSR | TIOCM_CAR;
797
798 if (readl(sport->port.membase + USR1) & USR1_RTSS)
799 tmp |= TIOCM_CTS;
800
801 if (readl(sport->port.membase + UCR2) & UCR2_CTS)
802 tmp |= TIOCM_RTS;
803
804 if (readl(sport->port.membase + uts_reg(sport)) & UTS_LOOP)
805 tmp |= TIOCM_LOOP;
806
807 return tmp;
808 }
809
imx_set_mctrl(struct uart_port * port,unsigned int mctrl)810 static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl)
811 {
812 struct imx_port *sport = (struct imx_port *)port;
813 unsigned long temp;
814
815 temp = readl(sport->port.membase + UCR2) & ~(UCR2_CTS | UCR2_CTSC);
816 if (mctrl & TIOCM_RTS)
817 temp |= UCR2_CTS | UCR2_CTSC;
818
819 writel(temp, sport->port.membase + UCR2);
820
821 temp = readl(sport->port.membase + uts_reg(sport)) & ~UTS_LOOP;
822 if (mctrl & TIOCM_LOOP)
823 temp |= UTS_LOOP;
824 writel(temp, sport->port.membase + uts_reg(sport));
825 }
826
827 /*
828 * Interrupts always disabled.
829 */
imx_break_ctl(struct uart_port * port,int break_state)830 static void imx_break_ctl(struct uart_port *port, int break_state)
831 {
832 struct imx_port *sport = (struct imx_port *)port;
833 unsigned long flags, temp;
834
835 spin_lock_irqsave(&sport->port.lock, flags);
836
837 temp = readl(sport->port.membase + UCR1) & ~UCR1_SNDBRK;
838
839 if (break_state != 0)
840 temp |= UCR1_SNDBRK;
841
842 writel(temp, sport->port.membase + UCR1);
843
844 spin_unlock_irqrestore(&sport->port.lock, flags);
845 }
846
847 #define TXTL 2 /* reset default */
848 #define RXTL 1 /* reset default */
849
imx_setup_ufcr(struct imx_port * sport,unsigned int mode)850 static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
851 {
852 unsigned int val;
853
854 /* set receiver / transmitter trigger level */
855 val = readl(sport->port.membase + UFCR) & (UFCR_RFDIV | UFCR_DCEDTE);
856 val |= TXTL << UFCR_TXTL_SHF | RXTL;
857 writel(val, sport->port.membase + UFCR);
858 return 0;
859 }
860
861 #define RX_BUF_SIZE (PAGE_SIZE)
imx_rx_dma_done(struct imx_port * sport)862 static void imx_rx_dma_done(struct imx_port *sport)
863 {
864 unsigned long temp;
865
866 /* Enable this interrupt when the RXFIFO is empty. */
867 temp = readl(sport->port.membase + UCR1);
868 temp |= UCR1_RRDYEN;
869 writel(temp, sport->port.membase + UCR1);
870
871 sport->dma_is_rxing = 0;
872
873 /* Is the shutdown waiting for us? */
874 if (waitqueue_active(&sport->dma_wait))
875 wake_up(&sport->dma_wait);
876 }
877
878 /*
879 * There are three kinds of RX DMA interrupts(such as in the MX6Q):
880 * [1] the RX DMA buffer is full.
881 * [2] the Aging timer expires(wait for 8 bytes long)
882 * [3] the Idle Condition Detect(enabled the UCR4_IDDMAEN).
883 *
884 * The [2] is trigger when a character was been sitting in the FIFO
885 * meanwhile [3] can wait for 32 bytes long when the RX line is
886 * on IDLE state and RxFIFO is empty.
887 */
dma_rx_callback(void * data)888 static void dma_rx_callback(void *data)
889 {
890 struct imx_port *sport = data;
891 struct dma_chan *chan = sport->dma_chan_rx;
892 struct scatterlist *sgl = &sport->rx_sgl;
893 struct tty_port *port = &sport->port.state->port;
894 struct dma_tx_state state;
895 enum dma_status status;
896 unsigned int count;
897
898 /* unmap it first */
899 dma_unmap_sg(sport->port.dev, sgl, 1, DMA_FROM_DEVICE);
900
901 status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
902 count = RX_BUF_SIZE - state.residue;
903
904 if (readl(sport->port.membase + USR2) & USR2_IDLE) {
905 /* In condition [3] the SDMA counted up too early */
906 count--;
907
908 writel(USR2_IDLE, sport->port.membase + USR2);
909 }
910
911 dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
912
913 if (count) {
914 tty_insert_flip_string(port, sport->rx_buf, count);
915 tty_flip_buffer_push(port);
916
917 start_rx_dma(sport);
918 } else
919 imx_rx_dma_done(sport);
920 }
921
start_rx_dma(struct imx_port * sport)922 static int start_rx_dma(struct imx_port *sport)
923 {
924 struct scatterlist *sgl = &sport->rx_sgl;
925 struct dma_chan *chan = sport->dma_chan_rx;
926 struct device *dev = sport->port.dev;
927 struct dma_async_tx_descriptor *desc;
928 int ret;
929
930 sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE);
931 ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE);
932 if (ret == 0) {
933 dev_err(dev, "DMA mapping error for RX.\n");
934 return -EINVAL;
935 }
936 desc = dmaengine_prep_slave_sg(chan, sgl, 1, DMA_DEV_TO_MEM,
937 DMA_PREP_INTERRUPT);
938 if (!desc) {
939 dev_err(dev, "We cannot prepare for the RX slave dma!\n");
940 return -EINVAL;
941 }
942 desc->callback = dma_rx_callback;
943 desc->callback_param = sport;
944
945 dev_dbg(dev, "RX: prepare for the DMA.\n");
946 dmaengine_submit(desc);
947 dma_async_issue_pending(chan);
948 return 0;
949 }
950
imx_uart_dma_exit(struct imx_port * sport)951 static void imx_uart_dma_exit(struct imx_port *sport)
952 {
953 if (sport->dma_chan_rx) {
954 dma_release_channel(sport->dma_chan_rx);
955 sport->dma_chan_rx = NULL;
956
957 kfree(sport->rx_buf);
958 sport->rx_buf = NULL;
959 }
960
961 if (sport->dma_chan_tx) {
962 dma_release_channel(sport->dma_chan_tx);
963 sport->dma_chan_tx = NULL;
964 }
965
966 sport->dma_is_inited = 0;
967 }
968
imx_uart_dma_init(struct imx_port * sport)969 static int imx_uart_dma_init(struct imx_port *sport)
970 {
971 struct dma_slave_config slave_config = {};
972 struct device *dev = sport->port.dev;
973 int ret;
974
975 /* Prepare for RX : */
976 sport->dma_chan_rx = dma_request_slave_channel(dev, "rx");
977 if (!sport->dma_chan_rx) {
978 dev_dbg(dev, "cannot get the DMA channel.\n");
979 ret = -EINVAL;
980 goto err;
981 }
982
983 slave_config.direction = DMA_DEV_TO_MEM;
984 slave_config.src_addr = sport->port.mapbase + URXD0;
985 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
986 slave_config.src_maxburst = RXTL;
987 ret = dmaengine_slave_config(sport->dma_chan_rx, &slave_config);
988 if (ret) {
989 dev_err(dev, "error in RX dma configuration.\n");
990 goto err;
991 }
992
993 sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
994 if (!sport->rx_buf) {
995 dev_err(dev, "cannot alloc DMA buffer.\n");
996 ret = -ENOMEM;
997 goto err;
998 }
999
1000 /* Prepare for TX : */
1001 sport->dma_chan_tx = dma_request_slave_channel(dev, "tx");
1002 if (!sport->dma_chan_tx) {
1003 dev_err(dev, "cannot get the TX DMA channel!\n");
1004 ret = -EINVAL;
1005 goto err;
1006 }
1007
1008 slave_config.direction = DMA_MEM_TO_DEV;
1009 slave_config.dst_addr = sport->port.mapbase + URTX0;
1010 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1011 slave_config.dst_maxburst = TXTL;
1012 ret = dmaengine_slave_config(sport->dma_chan_tx, &slave_config);
1013 if (ret) {
1014 dev_err(dev, "error in TX dma configuration.");
1015 goto err;
1016 }
1017
1018 sport->dma_is_inited = 1;
1019
1020 return 0;
1021 err:
1022 imx_uart_dma_exit(sport);
1023 return ret;
1024 }
1025
imx_enable_dma(struct imx_port * sport)1026 static void imx_enable_dma(struct imx_port *sport)
1027 {
1028 unsigned long temp;
1029
1030 init_waitqueue_head(&sport->dma_wait);
1031
1032 /* set UCR1 */
1033 temp = readl(sport->port.membase + UCR1);
1034 temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN |
1035 /* wait for 32 idle frames for IDDMA interrupt */
1036 UCR1_ICD_REG(3);
1037 writel(temp, sport->port.membase + UCR1);
1038
1039 /* set UCR4 */
1040 temp = readl(sport->port.membase + UCR4);
1041 temp |= UCR4_IDDMAEN;
1042 writel(temp, sport->port.membase + UCR4);
1043
1044 sport->dma_is_enabled = 1;
1045 }
1046
imx_disable_dma(struct imx_port * sport)1047 static void imx_disable_dma(struct imx_port *sport)
1048 {
1049 unsigned long temp;
1050
1051 /* clear UCR1 */
1052 temp = readl(sport->port.membase + UCR1);
1053 temp &= ~(UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN);
1054 writel(temp, sport->port.membase + UCR1);
1055
1056 /* clear UCR2 */
1057 temp = readl(sport->port.membase + UCR2);
1058 temp &= ~(UCR2_CTSC | UCR2_CTS);
1059 writel(temp, sport->port.membase + UCR2);
1060
1061 /* clear UCR4 */
1062 temp = readl(sport->port.membase + UCR4);
1063 temp &= ~UCR4_IDDMAEN;
1064 writel(temp, sport->port.membase + UCR4);
1065
1066 sport->dma_is_enabled = 0;
1067 }
1068
1069 /* half the RX buffer size */
1070 #define CTSTL 16
1071
imx_startup(struct uart_port * port)1072 static int imx_startup(struct uart_port *port)
1073 {
1074 struct imx_port *sport = (struct imx_port *)port;
1075 int retval, i;
1076 unsigned long flags, temp;
1077
1078 retval = clk_prepare_enable(sport->clk_per);
1079 if (retval)
1080 goto error_out1;
1081 retval = clk_prepare_enable(sport->clk_ipg);
1082 if (retval) {
1083 clk_disable_unprepare(sport->clk_per);
1084 goto error_out1;
1085 }
1086
1087 imx_setup_ufcr(sport, 0);
1088
1089 /* disable the DREN bit (Data Ready interrupt enable) before
1090 * requesting IRQs
1091 */
1092 temp = readl(sport->port.membase + UCR4);
1093
1094 if (USE_IRDA(sport))
1095 temp |= UCR4_IRSC;
1096
1097 /* set the trigger level for CTS */
1098 temp &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF);
1099 temp |= CTSTL << UCR4_CTSTL_SHF;
1100
1101 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
1102
1103 /* Reset fifo's and state machines */
1104 i = 100;
1105
1106 temp = readl(sport->port.membase + UCR2);
1107 temp &= ~UCR2_SRST;
1108 writel(temp, sport->port.membase + UCR2);
1109
1110 while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0))
1111 udelay(1);
1112
1113 /*
1114 * Allocate the IRQ(s) i.MX1 has three interrupts whereas later
1115 * chips only have one interrupt.
1116 */
1117 if (sport->txirq > 0) {
1118 retval = request_irq(sport->rxirq, imx_rxint, 0,
1119 dev_name(port->dev), sport);
1120 if (retval)
1121 goto error_out1;
1122
1123 retval = request_irq(sport->txirq, imx_txint, 0,
1124 dev_name(port->dev), sport);
1125 if (retval)
1126 goto error_out2;
1127
1128 /* do not use RTS IRQ on IrDA */
1129 if (!USE_IRDA(sport)) {
1130 retval = request_irq(sport->rtsirq, imx_rtsint, 0,
1131 dev_name(port->dev), sport);
1132 if (retval)
1133 goto error_out3;
1134 }
1135 } else {
1136 retval = request_irq(sport->port.irq, imx_int, 0,
1137 dev_name(port->dev), sport);
1138 if (retval) {
1139 free_irq(sport->port.irq, sport);
1140 goto error_out1;
1141 }
1142 }
1143
1144 spin_lock_irqsave(&sport->port.lock, flags);
1145
1146 /*
1147 * Finally, clear and enable interrupts
1148 */
1149 writel(USR1_RTSD, sport->port.membase + USR1);
1150 writel(USR2_ORE, sport->port.membase + USR2);
1151
1152 temp = readl(sport->port.membase + UCR1);
1153 temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
1154
1155 if (USE_IRDA(sport)) {
1156 temp |= UCR1_IREN;
1157 temp &= ~(UCR1_RTSDEN);
1158 }
1159
1160 writel(temp, sport->port.membase + UCR1);
1161
1162 temp = readl(sport->port.membase + UCR4);
1163 temp |= UCR4_OREN;
1164 writel(temp, sport->port.membase + UCR4);
1165
1166 temp = readl(sport->port.membase + UCR2);
1167 temp |= (UCR2_RXEN | UCR2_TXEN);
1168 if (!sport->have_rtscts)
1169 temp |= UCR2_IRTS;
1170 writel(temp, sport->port.membase + UCR2);
1171
1172 if (!is_imx1_uart(sport)) {
1173 temp = readl(sport->port.membase + UCR3);
1174 temp |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP;
1175 writel(temp, sport->port.membase + UCR3);
1176 }
1177
1178 if (USE_IRDA(sport)) {
1179 temp = readl(sport->port.membase + UCR4);
1180 if (sport->irda_inv_rx)
1181 temp |= UCR4_INVR;
1182 else
1183 temp &= ~(UCR4_INVR);
1184 writel(temp | UCR4_DREN, sport->port.membase + UCR4);
1185
1186 temp = readl(sport->port.membase + UCR3);
1187 if (sport->irda_inv_tx)
1188 temp |= UCR3_INVT;
1189 else
1190 temp &= ~(UCR3_INVT);
1191 writel(temp, sport->port.membase + UCR3);
1192 }
1193
1194 /*
1195 * Enable modem status interrupts
1196 */
1197 imx_enable_ms(&sport->port);
1198 spin_unlock_irqrestore(&sport->port.lock, flags);
1199
1200 if (USE_IRDA(sport)) {
1201 struct imxuart_platform_data *pdata;
1202 pdata = dev_get_platdata(sport->port.dev);
1203 sport->irda_inv_rx = pdata->irda_inv_rx;
1204 sport->irda_inv_tx = pdata->irda_inv_tx;
1205 sport->trcv_delay = pdata->transceiver_delay;
1206 if (pdata->irda_enable)
1207 pdata->irda_enable(1);
1208 }
1209
1210 return 0;
1211
1212 error_out3:
1213 if (sport->txirq)
1214 free_irq(sport->txirq, sport);
1215 error_out2:
1216 if (sport->rxirq)
1217 free_irq(sport->rxirq, sport);
1218 error_out1:
1219 return retval;
1220 }
1221
imx_shutdown(struct uart_port * port)1222 static void imx_shutdown(struct uart_port *port)
1223 {
1224 struct imx_port *sport = (struct imx_port *)port;
1225 unsigned long temp;
1226 unsigned long flags;
1227
1228 if (sport->dma_is_enabled) {
1229 int ret;
1230
1231 /* We have to wait for the DMA to finish. */
1232 ret = wait_event_interruptible(sport->dma_wait,
1233 !sport->dma_is_rxing && !sport->dma_is_txing);
1234 if (ret != 0) {
1235 sport->dma_is_rxing = 0;
1236 sport->dma_is_txing = 0;
1237 dmaengine_terminate_all(sport->dma_chan_tx);
1238 dmaengine_terminate_all(sport->dma_chan_rx);
1239 }
1240 imx_stop_tx(port);
1241 imx_stop_rx(port);
1242 imx_disable_dma(sport);
1243 imx_uart_dma_exit(sport);
1244 }
1245
1246 spin_lock_irqsave(&sport->port.lock, flags);
1247 temp = readl(sport->port.membase + UCR2);
1248 temp &= ~(UCR2_TXEN);
1249 writel(temp, sport->port.membase + UCR2);
1250 spin_unlock_irqrestore(&sport->port.lock, flags);
1251
1252 if (USE_IRDA(sport)) {
1253 struct imxuart_platform_data *pdata;
1254 pdata = dev_get_platdata(sport->port.dev);
1255 if (pdata->irda_enable)
1256 pdata->irda_enable(0);
1257 }
1258
1259 /*
1260 * Stop our timer.
1261 */
1262 del_timer_sync(&sport->timer);
1263
1264 /*
1265 * Free the interrupts
1266 */
1267 if (sport->txirq > 0) {
1268 if (!USE_IRDA(sport))
1269 free_irq(sport->rtsirq, sport);
1270 free_irq(sport->txirq, sport);
1271 free_irq(sport->rxirq, sport);
1272 } else
1273 free_irq(sport->port.irq, sport);
1274
1275 /*
1276 * Disable all interrupts, port and break condition.
1277 */
1278
1279 spin_lock_irqsave(&sport->port.lock, flags);
1280 temp = readl(sport->port.membase + UCR1);
1281 temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
1282 if (USE_IRDA(sport))
1283 temp &= ~(UCR1_IREN);
1284
1285 writel(temp, sport->port.membase + UCR1);
1286 spin_unlock_irqrestore(&sport->port.lock, flags);
1287
1288 clk_disable_unprepare(sport->clk_per);
1289 clk_disable_unprepare(sport->clk_ipg);
1290 }
1291
imx_flush_buffer(struct uart_port * port)1292 static void imx_flush_buffer(struct uart_port *port)
1293 {
1294 struct imx_port *sport = (struct imx_port *)port;
1295
1296 if (sport->dma_is_enabled) {
1297 sport->tx_bytes = 0;
1298 dmaengine_terminate_all(sport->dma_chan_tx);
1299 }
1300 }
1301
1302 static void
imx_set_termios(struct uart_port * port,struct ktermios * termios,struct ktermios * old)1303 imx_set_termios(struct uart_port *port, struct ktermios *termios,
1304 struct ktermios *old)
1305 {
1306 struct imx_port *sport = (struct imx_port *)port;
1307 unsigned long flags;
1308 unsigned int ucr2, old_ucr1, old_txrxen, baud, quot;
1309 unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
1310 unsigned int div, ufcr;
1311 unsigned long num, denom;
1312 uint64_t tdiv64;
1313
1314 /*
1315 * If we don't support modem control lines, don't allow
1316 * these to be set.
1317 */
1318 if (0) {
1319 termios->c_cflag &= ~(HUPCL | CRTSCTS | CMSPAR);
1320 termios->c_cflag |= CLOCAL;
1321 }
1322
1323 /*
1324 * We only support CS7 and CS8.
1325 */
1326 while ((termios->c_cflag & CSIZE) != CS7 &&
1327 (termios->c_cflag & CSIZE) != CS8) {
1328 termios->c_cflag &= ~CSIZE;
1329 termios->c_cflag |= old_csize;
1330 old_csize = CS8;
1331 }
1332
1333 if ((termios->c_cflag & CSIZE) == CS8)
1334 ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS;
1335 else
1336 ucr2 = UCR2_SRST | UCR2_IRTS;
1337
1338 if (termios->c_cflag & CRTSCTS) {
1339 if (sport->have_rtscts) {
1340 ucr2 &= ~UCR2_IRTS;
1341 ucr2 |= UCR2_CTSC;
1342
1343 /* Can we enable the DMA support? */
1344 if (is_imx6q_uart(sport) && !uart_console(port)
1345 && !sport->dma_is_inited)
1346 imx_uart_dma_init(sport);
1347 } else {
1348 termios->c_cflag &= ~CRTSCTS;
1349 }
1350 }
1351
1352 if (termios->c_cflag & CSTOPB)
1353 ucr2 |= UCR2_STPB;
1354 if (termios->c_cflag & PARENB) {
1355 ucr2 |= UCR2_PREN;
1356 if (termios->c_cflag & PARODD)
1357 ucr2 |= UCR2_PROE;
1358 }
1359
1360 del_timer_sync(&sport->timer);
1361
1362 /*
1363 * Ask the core to calculate the divisor for us.
1364 */
1365 baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
1366 quot = uart_get_divisor(port, baud);
1367
1368 spin_lock_irqsave(&sport->port.lock, flags);
1369
1370 sport->port.read_status_mask = 0;
1371 if (termios->c_iflag & INPCK)
1372 sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR);
1373 if (termios->c_iflag & (BRKINT | PARMRK))
1374 sport->port.read_status_mask |= URXD_BRK;
1375
1376 /*
1377 * Characters to ignore
1378 */
1379 sport->port.ignore_status_mask = 0;
1380 if (termios->c_iflag & IGNPAR)
1381 sport->port.ignore_status_mask |= URXD_PRERR;
1382 if (termios->c_iflag & IGNBRK) {
1383 sport->port.ignore_status_mask |= URXD_BRK;
1384 /*
1385 * If we're ignoring parity and break indicators,
1386 * ignore overruns too (for real raw support).
1387 */
1388 if (termios->c_iflag & IGNPAR)
1389 sport->port.ignore_status_mask |= URXD_OVRRUN;
1390 }
1391
1392 /*
1393 * Update the per-port timeout.
1394 */
1395 uart_update_timeout(port, termios->c_cflag, baud);
1396
1397 /*
1398 * disable interrupts and drain transmitter
1399 */
1400 old_ucr1 = readl(sport->port.membase + UCR1);
1401 writel(old_ucr1 & ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN),
1402 sport->port.membase + UCR1);
1403
1404 while (!(readl(sport->port.membase + USR2) & USR2_TXDC))
1405 barrier();
1406
1407 /* then, disable everything */
1408 old_txrxen = readl(sport->port.membase + UCR2);
1409 writel(old_txrxen & ~(UCR2_TXEN | UCR2_RXEN),
1410 sport->port.membase + UCR2);
1411 old_txrxen &= (UCR2_TXEN | UCR2_RXEN);
1412
1413 if (USE_IRDA(sport)) {
1414 /*
1415 * use maximum available submodule frequency to
1416 * avoid missing short pulses due to low sampling rate
1417 */
1418 div = 1;
1419 } else {
1420 /* custom-baudrate handling */
1421 div = sport->port.uartclk / (baud * 16);
1422 if (baud == 38400 && quot != div)
1423 baud = sport->port.uartclk / (quot * 16);
1424
1425 div = sport->port.uartclk / (baud * 16);
1426 if (div > 7)
1427 div = 7;
1428 if (!div)
1429 div = 1;
1430 }
1431
1432 rational_best_approximation(16 * div * baud, sport->port.uartclk,
1433 1 << 16, 1 << 16, &num, &denom);
1434
1435 tdiv64 = sport->port.uartclk;
1436 tdiv64 *= num;
1437 do_div(tdiv64, denom * 16 * div);
1438 tty_termios_encode_baud_rate(termios,
1439 (speed_t)tdiv64, (speed_t)tdiv64);
1440
1441 num -= 1;
1442 denom -= 1;
1443
1444 ufcr = readl(sport->port.membase + UFCR);
1445 ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
1446 if (sport->dte_mode)
1447 ufcr |= UFCR_DCEDTE;
1448 writel(ufcr, sport->port.membase + UFCR);
1449
1450 writel(num, sport->port.membase + UBIR);
1451 writel(denom, sport->port.membase + UBMR);
1452
1453 if (!is_imx1_uart(sport))
1454 writel(sport->port.uartclk / div / 1000,
1455 sport->port.membase + IMX21_ONEMS);
1456
1457 writel(old_ucr1, sport->port.membase + UCR1);
1458
1459 /* set the parity, stop bits and data size */
1460 writel(ucr2 | old_txrxen, sport->port.membase + UCR2);
1461
1462 if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
1463 imx_enable_ms(&sport->port);
1464
1465 if (sport->dma_is_inited && !sport->dma_is_enabled)
1466 imx_enable_dma(sport);
1467 spin_unlock_irqrestore(&sport->port.lock, flags);
1468 }
1469
imx_type(struct uart_port * port)1470 static const char *imx_type(struct uart_port *port)
1471 {
1472 struct imx_port *sport = (struct imx_port *)port;
1473
1474 return sport->port.type == PORT_IMX ? "IMX" : NULL;
1475 }
1476
1477 /*
1478 * Configure/autoconfigure the port.
1479 */
imx_config_port(struct uart_port * port,int flags)1480 static void imx_config_port(struct uart_port *port, int flags)
1481 {
1482 struct imx_port *sport = (struct imx_port *)port;
1483
1484 if (flags & UART_CONFIG_TYPE)
1485 sport->port.type = PORT_IMX;
1486 }
1487
1488 /*
1489 * Verify the new serial_struct (for TIOCSSERIAL).
1490 * The only change we allow are to the flags and type, and
1491 * even then only between PORT_IMX and PORT_UNKNOWN
1492 */
1493 static int
imx_verify_port(struct uart_port * port,struct serial_struct * ser)1494 imx_verify_port(struct uart_port *port, struct serial_struct *ser)
1495 {
1496 struct imx_port *sport = (struct imx_port *)port;
1497 int ret = 0;
1498
1499 if (ser->type != PORT_UNKNOWN && ser->type != PORT_IMX)
1500 ret = -EINVAL;
1501 if (sport->port.irq != ser->irq)
1502 ret = -EINVAL;
1503 if (ser->io_type != UPIO_MEM)
1504 ret = -EINVAL;
1505 if (sport->port.uartclk / 16 != ser->baud_base)
1506 ret = -EINVAL;
1507 if (sport->port.mapbase != (unsigned long)ser->iomem_base)
1508 ret = -EINVAL;
1509 if (sport->port.iobase != ser->port)
1510 ret = -EINVAL;
1511 if (ser->hub6 != 0)
1512 ret = -EINVAL;
1513 return ret;
1514 }
1515
1516 #if defined(CONFIG_CONSOLE_POLL)
imx_poll_get_char(struct uart_port * port)1517 static int imx_poll_get_char(struct uart_port *port)
1518 {
1519 if (!(readl(port->membase + USR2) & USR2_RDR))
1520 return NO_POLL_CHAR;
1521
1522 return readl(port->membase + URXD0) & URXD_RX_DATA;
1523 }
1524
imx_poll_put_char(struct uart_port * port,unsigned char c)1525 static void imx_poll_put_char(struct uart_port *port, unsigned char c)
1526 {
1527 struct imx_port_ucrs old_ucr;
1528 unsigned int status;
1529
1530 /* save control registers */
1531 imx_port_ucrs_save(port, &old_ucr);
1532
1533 /* disable interrupts */
1534 writel(UCR1_UARTEN, port->membase + UCR1);
1535 writel(old_ucr.ucr2 & ~(UCR2_ATEN | UCR2_RTSEN | UCR2_ESCI),
1536 port->membase + UCR2);
1537 writel(old_ucr.ucr3 & ~(UCR3_DCD | UCR3_RI | UCR3_DTREN),
1538 port->membase + UCR3);
1539
1540 /* drain */
1541 do {
1542 status = readl(port->membase + USR1);
1543 } while (~status & USR1_TRDY);
1544
1545 /* write */
1546 writel(c, port->membase + URTX0);
1547
1548 /* flush */
1549 do {
1550 status = readl(port->membase + USR2);
1551 } while (~status & USR2_TXDC);
1552
1553 /* restore control registers */
1554 imx_port_ucrs_restore(port, &old_ucr);
1555 }
1556 #endif
1557
1558 static struct uart_ops imx_pops = {
1559 .tx_empty = imx_tx_empty,
1560 .set_mctrl = imx_set_mctrl,
1561 .get_mctrl = imx_get_mctrl,
1562 .stop_tx = imx_stop_tx,
1563 .start_tx = imx_start_tx,
1564 .stop_rx = imx_stop_rx,
1565 .enable_ms = imx_enable_ms,
1566 .break_ctl = imx_break_ctl,
1567 .startup = imx_startup,
1568 .shutdown = imx_shutdown,
1569 .flush_buffer = imx_flush_buffer,
1570 .set_termios = imx_set_termios,
1571 .type = imx_type,
1572 .config_port = imx_config_port,
1573 .verify_port = imx_verify_port,
1574 #if defined(CONFIG_CONSOLE_POLL)
1575 .poll_get_char = imx_poll_get_char,
1576 .poll_put_char = imx_poll_put_char,
1577 #endif
1578 };
1579
1580 static struct imx_port *imx_ports[UART_NR];
1581
1582 #ifdef CONFIG_SERIAL_IMX_CONSOLE
imx_console_putchar(struct uart_port * port,int ch)1583 static void imx_console_putchar(struct uart_port *port, int ch)
1584 {
1585 struct imx_port *sport = (struct imx_port *)port;
1586
1587 while (readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)
1588 barrier();
1589
1590 writel(ch, sport->port.membase + URTX0);
1591 }
1592
1593 /*
1594 * Interrupts are disabled on entering
1595 */
1596 static void
imx_console_write(struct console * co,const char * s,unsigned int count)1597 imx_console_write(struct console *co, const char *s, unsigned int count)
1598 {
1599 struct imx_port *sport = imx_ports[co->index];
1600 struct imx_port_ucrs old_ucr;
1601 unsigned int ucr1;
1602 unsigned long flags = 0;
1603 int locked = 1;
1604 int retval;
1605
1606 retval = clk_enable(sport->clk_per);
1607 if (retval)
1608 return;
1609 retval = clk_enable(sport->clk_ipg);
1610 if (retval) {
1611 clk_disable(sport->clk_per);
1612 return;
1613 }
1614
1615 if (sport->port.sysrq)
1616 locked = 0;
1617 else if (oops_in_progress)
1618 locked = spin_trylock_irqsave(&sport->port.lock, flags);
1619 else
1620 spin_lock_irqsave(&sport->port.lock, flags);
1621
1622 /*
1623 * First, save UCR1/2/3 and then disable interrupts
1624 */
1625 imx_port_ucrs_save(&sport->port, &old_ucr);
1626 ucr1 = old_ucr.ucr1;
1627
1628 if (is_imx1_uart(sport))
1629 ucr1 |= IMX1_UCR1_UARTCLKEN;
1630 ucr1 |= UCR1_UARTEN;
1631 ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN);
1632
1633 writel(ucr1, sport->port.membase + UCR1);
1634
1635 writel(old_ucr.ucr2 | UCR2_TXEN, sport->port.membase + UCR2);
1636
1637 uart_console_write(&sport->port, s, count, imx_console_putchar);
1638
1639 /*
1640 * Finally, wait for transmitter to become empty
1641 * and restore UCR1/2/3
1642 */
1643 while (!(readl(sport->port.membase + USR2) & USR2_TXDC));
1644
1645 imx_port_ucrs_restore(&sport->port, &old_ucr);
1646
1647 if (locked)
1648 spin_unlock_irqrestore(&sport->port.lock, flags);
1649
1650 clk_disable(sport->clk_ipg);
1651 clk_disable(sport->clk_per);
1652 }
1653
1654 /*
1655 * If the port was already initialised (eg, by a boot loader),
1656 * try to determine the current setup.
1657 */
1658 static void __init
imx_console_get_options(struct imx_port * sport,int * baud,int * parity,int * bits)1659 imx_console_get_options(struct imx_port *sport, int *baud,
1660 int *parity, int *bits)
1661 {
1662
1663 if (readl(sport->port.membase + UCR1) & UCR1_UARTEN) {
1664 /* ok, the port was enabled */
1665 unsigned int ucr2, ubir, ubmr, uartclk;
1666 unsigned int baud_raw;
1667 unsigned int ucfr_rfdiv;
1668
1669 ucr2 = readl(sport->port.membase + UCR2);
1670
1671 *parity = 'n';
1672 if (ucr2 & UCR2_PREN) {
1673 if (ucr2 & UCR2_PROE)
1674 *parity = 'o';
1675 else
1676 *parity = 'e';
1677 }
1678
1679 if (ucr2 & UCR2_WS)
1680 *bits = 8;
1681 else
1682 *bits = 7;
1683
1684 ubir = readl(sport->port.membase + UBIR) & 0xffff;
1685 ubmr = readl(sport->port.membase + UBMR) & 0xffff;
1686
1687 ucfr_rfdiv = (readl(sport->port.membase + UFCR) & UFCR_RFDIV) >> 7;
1688 if (ucfr_rfdiv == 6)
1689 ucfr_rfdiv = 7;
1690 else
1691 ucfr_rfdiv = 6 - ucfr_rfdiv;
1692
1693 uartclk = clk_get_rate(sport->clk_per);
1694 uartclk /= ucfr_rfdiv;
1695
1696 { /*
1697 * The next code provides exact computation of
1698 * baud_raw = round(((uartclk/16) * (ubir + 1)) / (ubmr + 1))
1699 * without need of float support or long long division,
1700 * which would be required to prevent 32bit arithmetic overflow
1701 */
1702 unsigned int mul = ubir + 1;
1703 unsigned int div = 16 * (ubmr + 1);
1704 unsigned int rem = uartclk % div;
1705
1706 baud_raw = (uartclk / div) * mul;
1707 baud_raw += (rem * mul + div / 2) / div;
1708 *baud = (baud_raw + 50) / 100 * 100;
1709 }
1710
1711 if (*baud != baud_raw)
1712 pr_info("Console IMX rounded baud rate from %d to %d\n",
1713 baud_raw, *baud);
1714 }
1715 }
1716
1717 static int __init
imx_console_setup(struct console * co,char * options)1718 imx_console_setup(struct console *co, char *options)
1719 {
1720 struct imx_port *sport;
1721 int baud = 9600;
1722 int bits = 8;
1723 int parity = 'n';
1724 int flow = 'n';
1725 int retval;
1726
1727 /*
1728 * Check whether an invalid uart number has been specified, and
1729 * if so, search for the first available port that does have
1730 * console support.
1731 */
1732 if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports))
1733 co->index = 0;
1734 sport = imx_ports[co->index];
1735 if (sport == NULL)
1736 return -ENODEV;
1737
1738 /* For setting the registers, we only need to enable the ipg clock. */
1739 retval = clk_prepare_enable(sport->clk_ipg);
1740 if (retval)
1741 goto error_console;
1742
1743 if (options)
1744 uart_parse_options(options, &baud, &parity, &bits, &flow);
1745 else
1746 imx_console_get_options(sport, &baud, &parity, &bits);
1747
1748 imx_setup_ufcr(sport, 0);
1749
1750 retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
1751
1752 clk_disable(sport->clk_ipg);
1753 if (retval) {
1754 clk_unprepare(sport->clk_ipg);
1755 goto error_console;
1756 }
1757
1758 retval = clk_prepare(sport->clk_per);
1759 if (retval)
1760 clk_disable_unprepare(sport->clk_ipg);
1761
1762 error_console:
1763 return retval;
1764 }
1765
1766 static struct uart_driver imx_reg;
1767 static struct console imx_console = {
1768 .name = DEV_NAME,
1769 .write = imx_console_write,
1770 .device = uart_console_device,
1771 .setup = imx_console_setup,
1772 .flags = CON_PRINTBUFFER,
1773 .index = -1,
1774 .data = &imx_reg,
1775 };
1776
1777 #define IMX_CONSOLE &imx_console
1778 #else
1779 #define IMX_CONSOLE NULL
1780 #endif
1781
1782 static struct uart_driver imx_reg = {
1783 .owner = THIS_MODULE,
1784 .driver_name = DRIVER_NAME,
1785 .dev_name = DEV_NAME,
1786 .major = SERIAL_IMX_MAJOR,
1787 .minor = MINOR_START,
1788 .nr = ARRAY_SIZE(imx_ports),
1789 .cons = IMX_CONSOLE,
1790 };
1791
serial_imx_suspend(struct platform_device * dev,pm_message_t state)1792 static int serial_imx_suspend(struct platform_device *dev, pm_message_t state)
1793 {
1794 struct imx_port *sport = platform_get_drvdata(dev);
1795 unsigned int val;
1796
1797 /* enable wakeup from i.MX UART */
1798 val = readl(sport->port.membase + UCR3);
1799 val |= UCR3_AWAKEN;
1800 writel(val, sport->port.membase + UCR3);
1801
1802 uart_suspend_port(&imx_reg, &sport->port);
1803
1804 return 0;
1805 }
1806
serial_imx_resume(struct platform_device * dev)1807 static int serial_imx_resume(struct platform_device *dev)
1808 {
1809 struct imx_port *sport = platform_get_drvdata(dev);
1810 unsigned int val;
1811
1812 /* disable wakeup from i.MX UART */
1813 val = readl(sport->port.membase + UCR3);
1814 val &= ~UCR3_AWAKEN;
1815 writel(val, sport->port.membase + UCR3);
1816
1817 uart_resume_port(&imx_reg, &sport->port);
1818
1819 return 0;
1820 }
1821
1822 #ifdef CONFIG_OF
1823 /*
1824 * This function returns 1 iff pdev isn't a device instatiated by dt, 0 iff it
1825 * could successfully get all information from dt or a negative errno.
1826 */
serial_imx_probe_dt(struct imx_port * sport,struct platform_device * pdev)1827 static int serial_imx_probe_dt(struct imx_port *sport,
1828 struct platform_device *pdev)
1829 {
1830 struct device_node *np = pdev->dev.of_node;
1831 const struct of_device_id *of_id =
1832 of_match_device(imx_uart_dt_ids, &pdev->dev);
1833 int ret;
1834
1835 if (!np)
1836 /* no device tree device */
1837 return 1;
1838
1839 ret = of_alias_get_id(np, "serial");
1840 if (ret < 0) {
1841 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
1842 return ret;
1843 }
1844 sport->port.line = ret;
1845
1846 if (of_get_property(np, "fsl,uart-has-rtscts", NULL))
1847 sport->have_rtscts = 1;
1848
1849 if (of_get_property(np, "fsl,irda-mode", NULL))
1850 sport->use_irda = 1;
1851
1852 if (of_get_property(np, "fsl,dte-mode", NULL))
1853 sport->dte_mode = 1;
1854
1855 sport->devdata = of_id->data;
1856
1857 return 0;
1858 }
1859 #else
serial_imx_probe_dt(struct imx_port * sport,struct platform_device * pdev)1860 static inline int serial_imx_probe_dt(struct imx_port *sport,
1861 struct platform_device *pdev)
1862 {
1863 return 1;
1864 }
1865 #endif
1866
serial_imx_probe_pdata(struct imx_port * sport,struct platform_device * pdev)1867 static void serial_imx_probe_pdata(struct imx_port *sport,
1868 struct platform_device *pdev)
1869 {
1870 struct imxuart_platform_data *pdata = dev_get_platdata(&pdev->dev);
1871
1872 sport->port.line = pdev->id;
1873 sport->devdata = (struct imx_uart_data *) pdev->id_entry->driver_data;
1874
1875 if (!pdata)
1876 return;
1877
1878 if (pdata->flags & IMXUART_HAVE_RTSCTS)
1879 sport->have_rtscts = 1;
1880
1881 if (pdata->flags & IMXUART_IRDA)
1882 sport->use_irda = 1;
1883 }
1884
serial_imx_probe(struct platform_device * pdev)1885 static int serial_imx_probe(struct platform_device *pdev)
1886 {
1887 struct imx_port *sport;
1888 void __iomem *base;
1889 int ret = 0;
1890 struct resource *res;
1891
1892 sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
1893 if (!sport)
1894 return -ENOMEM;
1895
1896 ret = serial_imx_probe_dt(sport, pdev);
1897 if (ret > 0)
1898 serial_imx_probe_pdata(sport, pdev);
1899 else if (ret < 0)
1900 return ret;
1901
1902 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1903 base = devm_ioremap_resource(&pdev->dev, res);
1904 if (IS_ERR(base))
1905 return PTR_ERR(base);
1906
1907 sport->port.dev = &pdev->dev;
1908 sport->port.mapbase = res->start;
1909 sport->port.membase = base;
1910 sport->port.type = PORT_IMX,
1911 sport->port.iotype = UPIO_MEM;
1912 sport->port.irq = platform_get_irq(pdev, 0);
1913 sport->rxirq = platform_get_irq(pdev, 0);
1914 sport->txirq = platform_get_irq(pdev, 1);
1915 sport->rtsirq = platform_get_irq(pdev, 2);
1916 sport->port.fifosize = 32;
1917 sport->port.ops = &imx_pops;
1918 sport->port.flags = UPF_BOOT_AUTOCONF;
1919 init_timer(&sport->timer);
1920 sport->timer.function = imx_timeout;
1921 sport->timer.data = (unsigned long)sport;
1922
1923 sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1924 if (IS_ERR(sport->clk_ipg)) {
1925 ret = PTR_ERR(sport->clk_ipg);
1926 dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
1927 return ret;
1928 }
1929
1930 sport->clk_per = devm_clk_get(&pdev->dev, "per");
1931 if (IS_ERR(sport->clk_per)) {
1932 ret = PTR_ERR(sport->clk_per);
1933 dev_err(&pdev->dev, "failed to get per clk: %d\n", ret);
1934 return ret;
1935 }
1936
1937 sport->port.uartclk = clk_get_rate(sport->clk_per);
1938
1939 imx_ports[sport->port.line] = sport;
1940
1941 platform_set_drvdata(pdev, sport);
1942
1943 return uart_add_one_port(&imx_reg, &sport->port);
1944 }
1945
serial_imx_remove(struct platform_device * pdev)1946 static int serial_imx_remove(struct platform_device *pdev)
1947 {
1948 struct imx_port *sport = platform_get_drvdata(pdev);
1949
1950 return uart_remove_one_port(&imx_reg, &sport->port);
1951 }
1952
1953 static struct platform_driver serial_imx_driver = {
1954 .probe = serial_imx_probe,
1955 .remove = serial_imx_remove,
1956
1957 .suspend = serial_imx_suspend,
1958 .resume = serial_imx_resume,
1959 .id_table = imx_uart_devtype,
1960 .driver = {
1961 .name = "imx-uart",
1962 .owner = THIS_MODULE,
1963 .of_match_table = imx_uart_dt_ids,
1964 },
1965 };
1966
imx_serial_init(void)1967 static int __init imx_serial_init(void)
1968 {
1969 int ret;
1970
1971 pr_info("Serial: IMX driver\n");
1972
1973 ret = uart_register_driver(&imx_reg);
1974 if (ret)
1975 return ret;
1976
1977 ret = platform_driver_register(&serial_imx_driver);
1978 if (ret != 0)
1979 uart_unregister_driver(&imx_reg);
1980
1981 return ret;
1982 }
1983
imx_serial_exit(void)1984 static void __exit imx_serial_exit(void)
1985 {
1986 platform_driver_unregister(&serial_imx_driver);
1987 uart_unregister_driver(&imx_reg);
1988 }
1989
1990 module_init(imx_serial_init);
1991 module_exit(imx_serial_exit);
1992
1993 MODULE_AUTHOR("Sascha Hauer");
1994 MODULE_DESCRIPTION("IMX generic serial port driver");
1995 MODULE_LICENSE("GPL");
1996 MODULE_ALIAS("platform:imx-uart");
1997