1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) Maxime Coquelin 2015
4 * Copyright (C) STMicroelectronics SA 2017
5 * Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com>
6 * Gerald Baeza <gerald.baeza@foss.st.com>
7 * Erwan Le Ray <erwan.leray@foss.st.com>
8 *
9 * Inspired by st-asc.c from STMicroelectronics (c)
10 */
11
12 #include <linux/clk.h>
13 #include <linux/console.h>
14 #include <linux/delay.h>
15 #include <linux/dma-direction.h>
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/irq.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_platform.h>
24 #include <linux/pinctrl/consumer.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/serial_core.h>
29 #include <linux/serial.h>
30 #include <linux/spinlock.h>
31 #include <linux/sysrq.h>
32 #include <linux/tty_flip.h>
33 #include <linux/tty.h>
34
35 #include "serial_mctrl_gpio.h"
36 #include "stm32-usart.h"
37
38
39 /* Register offsets */
40 static struct stm32_usart_info __maybe_unused stm32f4_info = {
41 .ofs = {
42 .isr = 0x00,
43 .rdr = 0x04,
44 .tdr = 0x04,
45 .brr = 0x08,
46 .cr1 = 0x0c,
47 .cr2 = 0x10,
48 .cr3 = 0x14,
49 .gtpr = 0x18,
50 .rtor = UNDEF_REG,
51 .rqr = UNDEF_REG,
52 .icr = UNDEF_REG,
53 },
54 .cfg = {
55 .uart_enable_bit = 13,
56 .has_7bits_data = false,
57 .fifosize = 1,
58 }
59 };
60
61 static struct stm32_usart_info __maybe_unused stm32f7_info = {
62 .ofs = {
63 .cr1 = 0x00,
64 .cr2 = 0x04,
65 .cr3 = 0x08,
66 .brr = 0x0c,
67 .gtpr = 0x10,
68 .rtor = 0x14,
69 .rqr = 0x18,
70 .isr = 0x1c,
71 .icr = 0x20,
72 .rdr = 0x24,
73 .tdr = 0x28,
74 },
75 .cfg = {
76 .uart_enable_bit = 0,
77 .has_7bits_data = true,
78 .has_swap = true,
79 .fifosize = 1,
80 }
81 };
82
83 static struct stm32_usart_info __maybe_unused stm32h7_info = {
84 .ofs = {
85 .cr1 = 0x00,
86 .cr2 = 0x04,
87 .cr3 = 0x08,
88 .brr = 0x0c,
89 .gtpr = 0x10,
90 .rtor = 0x14,
91 .rqr = 0x18,
92 .isr = 0x1c,
93 .icr = 0x20,
94 .rdr = 0x24,
95 .tdr = 0x28,
96 },
97 .cfg = {
98 .uart_enable_bit = 0,
99 .has_7bits_data = true,
100 .has_swap = true,
101 .has_wakeup = true,
102 .has_fifo = true,
103 .fifosize = 16,
104 }
105 };
106
107 static void stm32_usart_stop_tx(struct uart_port *port);
108 static void stm32_usart_transmit_chars(struct uart_port *port);
109 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch);
110
to_stm32_port(struct uart_port * port)111 static inline struct stm32_port *to_stm32_port(struct uart_port *port)
112 {
113 return container_of(port, struct stm32_port, port);
114 }
115
stm32_usart_set_bits(struct uart_port * port,u32 reg,u32 bits)116 static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits)
117 {
118 u32 val;
119
120 val = readl_relaxed(port->membase + reg);
121 val |= bits;
122 writel_relaxed(val, port->membase + reg);
123 }
124
stm32_usart_clr_bits(struct uart_port * port,u32 reg,u32 bits)125 static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits)
126 {
127 u32 val;
128
129 val = readl_relaxed(port->membase + reg);
130 val &= ~bits;
131 writel_relaxed(val, port->membase + reg);
132 }
133
stm32_usart_tx_empty(struct uart_port * port)134 static unsigned int stm32_usart_tx_empty(struct uart_port *port)
135 {
136 struct stm32_port *stm32_port = to_stm32_port(port);
137 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
138
139 if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
140 return TIOCSER_TEMT;
141
142 return 0;
143 }
144
stm32_usart_rs485_rts_enable(struct uart_port * port)145 static void stm32_usart_rs485_rts_enable(struct uart_port *port)
146 {
147 struct stm32_port *stm32_port = to_stm32_port(port);
148 struct serial_rs485 *rs485conf = &port->rs485;
149
150 if (stm32_port->hw_flow_control ||
151 !(rs485conf->flags & SER_RS485_ENABLED))
152 return;
153
154 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
155 mctrl_gpio_set(stm32_port->gpios,
156 stm32_port->port.mctrl | TIOCM_RTS);
157 } else {
158 mctrl_gpio_set(stm32_port->gpios,
159 stm32_port->port.mctrl & ~TIOCM_RTS);
160 }
161 }
162
stm32_usart_rs485_rts_disable(struct uart_port * port)163 static void stm32_usart_rs485_rts_disable(struct uart_port *port)
164 {
165 struct stm32_port *stm32_port = to_stm32_port(port);
166 struct serial_rs485 *rs485conf = &port->rs485;
167
168 if (stm32_port->hw_flow_control ||
169 !(rs485conf->flags & SER_RS485_ENABLED))
170 return;
171
172 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
173 mctrl_gpio_set(stm32_port->gpios,
174 stm32_port->port.mctrl & ~TIOCM_RTS);
175 } else {
176 mctrl_gpio_set(stm32_port->gpios,
177 stm32_port->port.mctrl | TIOCM_RTS);
178 }
179 }
180
stm32_usart_config_reg_rs485(u32 * cr1,u32 * cr3,u32 delay_ADE,u32 delay_DDE,u32 baud)181 static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
182 u32 delay_DDE, u32 baud)
183 {
184 u32 rs485_deat_dedt;
185 u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT);
186 bool over8;
187
188 *cr3 |= USART_CR3_DEM;
189 over8 = *cr1 & USART_CR1_OVER8;
190
191 *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
192
193 if (over8)
194 rs485_deat_dedt = delay_ADE * baud * 8;
195 else
196 rs485_deat_dedt = delay_ADE * baud * 16;
197
198 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
199 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
200 rs485_deat_dedt_max : rs485_deat_dedt;
201 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) &
202 USART_CR1_DEAT_MASK;
203 *cr1 |= rs485_deat_dedt;
204
205 if (over8)
206 rs485_deat_dedt = delay_DDE * baud * 8;
207 else
208 rs485_deat_dedt = delay_DDE * baud * 16;
209
210 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
211 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
212 rs485_deat_dedt_max : rs485_deat_dedt;
213 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) &
214 USART_CR1_DEDT_MASK;
215 *cr1 |= rs485_deat_dedt;
216 }
217
stm32_usart_config_rs485(struct uart_port * port,struct ktermios * termios,struct serial_rs485 * rs485conf)218 static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios,
219 struct serial_rs485 *rs485conf)
220 {
221 struct stm32_port *stm32_port = to_stm32_port(port);
222 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
223 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
224 u32 usartdiv, baud, cr1, cr3;
225 bool over8;
226
227 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
228
229 if (rs485conf->flags & SER_RS485_ENABLED) {
230 cr1 = readl_relaxed(port->membase + ofs->cr1);
231 cr3 = readl_relaxed(port->membase + ofs->cr3);
232 usartdiv = readl_relaxed(port->membase + ofs->brr);
233 usartdiv = usartdiv & GENMASK(15, 0);
234 over8 = cr1 & USART_CR1_OVER8;
235
236 if (over8)
237 usartdiv = usartdiv | (usartdiv & GENMASK(4, 0))
238 << USART_BRR_04_R_SHIFT;
239
240 baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv);
241 stm32_usart_config_reg_rs485(&cr1, &cr3,
242 rs485conf->delay_rts_before_send,
243 rs485conf->delay_rts_after_send,
244 baud);
245
246 if (rs485conf->flags & SER_RS485_RTS_ON_SEND)
247 cr3 &= ~USART_CR3_DEP;
248 else
249 cr3 |= USART_CR3_DEP;
250
251 writel_relaxed(cr3, port->membase + ofs->cr3);
252 writel_relaxed(cr1, port->membase + ofs->cr1);
253
254 if (!port->rs485_rx_during_tx_gpio)
255 rs485conf->flags |= SER_RS485_RX_DURING_TX;
256
257 } else {
258 stm32_usart_clr_bits(port, ofs->cr3,
259 USART_CR3_DEM | USART_CR3_DEP);
260 stm32_usart_clr_bits(port, ofs->cr1,
261 USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
262 }
263
264 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
265
266 /* Adjust RTS polarity in case it's driven in software */
267 if (stm32_usart_tx_empty(port))
268 stm32_usart_rs485_rts_disable(port);
269 else
270 stm32_usart_rs485_rts_enable(port);
271
272 return 0;
273 }
274
stm32_usart_init_rs485(struct uart_port * port,struct platform_device * pdev)275 static int stm32_usart_init_rs485(struct uart_port *port,
276 struct platform_device *pdev)
277 {
278 struct serial_rs485 *rs485conf = &port->rs485;
279
280 rs485conf->flags = 0;
281 rs485conf->delay_rts_before_send = 0;
282 rs485conf->delay_rts_after_send = 0;
283
284 if (!pdev->dev.of_node)
285 return -ENODEV;
286
287 return uart_get_rs485_mode(port);
288 }
289
stm32_usart_rx_dma_started(struct stm32_port * stm32_port)290 static bool stm32_usart_rx_dma_started(struct stm32_port *stm32_port)
291 {
292 return stm32_port->rx_ch ? stm32_port->rx_dma_busy : false;
293 }
294
stm32_usart_rx_dma_terminate(struct stm32_port * stm32_port)295 static void stm32_usart_rx_dma_terminate(struct stm32_port *stm32_port)
296 {
297 dmaengine_terminate_async(stm32_port->rx_ch);
298 stm32_port->rx_dma_busy = false;
299 }
300
stm32_usart_dma_pause_resume(struct stm32_port * stm32_port,struct dma_chan * chan,enum dma_status expected_status,int dmaengine_pause_or_resume (struct dma_chan *),bool stm32_usart_xx_dma_started (struct stm32_port *),void stm32_usart_xx_dma_terminate (struct stm32_port *))301 static int stm32_usart_dma_pause_resume(struct stm32_port *stm32_port,
302 struct dma_chan *chan,
303 enum dma_status expected_status,
304 int dmaengine_pause_or_resume(struct dma_chan *),
305 bool stm32_usart_xx_dma_started(struct stm32_port *),
306 void stm32_usart_xx_dma_terminate(struct stm32_port *))
307 {
308 struct uart_port *port = &stm32_port->port;
309 enum dma_status dma_status;
310 int ret;
311
312 if (!stm32_usart_xx_dma_started(stm32_port))
313 return -EPERM;
314
315 dma_status = dmaengine_tx_status(chan, chan->cookie, NULL);
316 if (dma_status != expected_status)
317 return -EAGAIN;
318
319 ret = dmaengine_pause_or_resume(chan);
320 if (ret) {
321 dev_err(port->dev, "DMA failed with error code: %d\n", ret);
322 stm32_usart_xx_dma_terminate(stm32_port);
323 }
324 return ret;
325 }
326
stm32_usart_rx_dma_pause(struct stm32_port * stm32_port)327 static int stm32_usart_rx_dma_pause(struct stm32_port *stm32_port)
328 {
329 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
330 DMA_IN_PROGRESS, dmaengine_pause,
331 stm32_usart_rx_dma_started,
332 stm32_usart_rx_dma_terminate);
333 }
334
stm32_usart_rx_dma_resume(struct stm32_port * stm32_port)335 static int stm32_usart_rx_dma_resume(struct stm32_port *stm32_port)
336 {
337 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
338 DMA_PAUSED, dmaengine_resume,
339 stm32_usart_rx_dma_started,
340 stm32_usart_rx_dma_terminate);
341 }
342
343 /* Return true when data is pending (in pio mode), and false when no data is pending. */
stm32_usart_pending_rx_pio(struct uart_port * port,u32 * sr)344 static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr)
345 {
346 struct stm32_port *stm32_port = to_stm32_port(port);
347 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
348
349 *sr = readl_relaxed(port->membase + ofs->isr);
350 /* Get pending characters in RDR or FIFO */
351 if (*sr & USART_SR_RXNE) {
352 /* Get all pending characters from the RDR or the FIFO when using interrupts */
353 if (!stm32_usart_rx_dma_started(stm32_port))
354 return true;
355
356 /* Handle only RX data errors when using DMA */
357 if (*sr & USART_SR_ERR_MASK)
358 return true;
359 }
360
361 return false;
362 }
363
stm32_usart_get_char_pio(struct uart_port * port)364 static u8 stm32_usart_get_char_pio(struct uart_port *port)
365 {
366 struct stm32_port *stm32_port = to_stm32_port(port);
367 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
368 unsigned long c;
369
370 c = readl_relaxed(port->membase + ofs->rdr);
371 /* Apply RDR data mask */
372 c &= stm32_port->rdr_mask;
373
374 return c;
375 }
376
stm32_usart_receive_chars_pio(struct uart_port * port)377 static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port)
378 {
379 struct stm32_port *stm32_port = to_stm32_port(port);
380 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
381 unsigned int size = 0;
382 u32 sr;
383 u8 c, flag;
384
385 while (stm32_usart_pending_rx_pio(port, &sr)) {
386 sr |= USART_SR_DUMMY_RX;
387 flag = TTY_NORMAL;
388
389 /*
390 * Status bits has to be cleared before reading the RDR:
391 * In FIFO mode, reading the RDR will pop the next data
392 * (if any) along with its status bits into the SR.
393 * Not doing so leads to misalignement between RDR and SR,
394 * and clear status bits of the next rx data.
395 *
396 * Clear errors flags for stm32f7 and stm32h7 compatible
397 * devices. On stm32f4 compatible devices, the error bit is
398 * cleared by the sequence [read SR - read DR].
399 */
400 if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
401 writel_relaxed(sr & USART_SR_ERR_MASK,
402 port->membase + ofs->icr);
403
404 c = stm32_usart_get_char_pio(port);
405 port->icount.rx++;
406 size++;
407 if (sr & USART_SR_ERR_MASK) {
408 if (sr & USART_SR_ORE) {
409 port->icount.overrun++;
410 } else if (sr & USART_SR_PE) {
411 port->icount.parity++;
412 } else if (sr & USART_SR_FE) {
413 /* Break detection if character is null */
414 if (!c) {
415 port->icount.brk++;
416 if (uart_handle_break(port))
417 continue;
418 } else {
419 port->icount.frame++;
420 }
421 }
422
423 sr &= port->read_status_mask;
424
425 if (sr & USART_SR_PE) {
426 flag = TTY_PARITY;
427 } else if (sr & USART_SR_FE) {
428 if (!c)
429 flag = TTY_BREAK;
430 else
431 flag = TTY_FRAME;
432 }
433 }
434
435 if (uart_prepare_sysrq_char(port, c))
436 continue;
437 uart_insert_char(port, sr, USART_SR_ORE, c, flag);
438 }
439
440 return size;
441 }
442
stm32_usart_push_buffer_dma(struct uart_port * port,unsigned int dma_size)443 static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size)
444 {
445 struct stm32_port *stm32_port = to_stm32_port(port);
446 struct tty_port *ttyport = &stm32_port->port.state->port;
447 unsigned char *dma_start;
448 int dma_count, i;
449
450 dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res);
451
452 /*
453 * Apply rdr_mask on buffer in order to mask parity bit.
454 * This loop is useless in cs8 mode because DMA copies only
455 * 8 bits and already ignores parity bit.
456 */
457 if (!(stm32_port->rdr_mask == (BIT(8) - 1)))
458 for (i = 0; i < dma_size; i++)
459 *(dma_start + i) &= stm32_port->rdr_mask;
460
461 dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size);
462 port->icount.rx += dma_count;
463 if (dma_count != dma_size)
464 port->icount.buf_overrun++;
465 stm32_port->last_res -= dma_count;
466 if (stm32_port->last_res == 0)
467 stm32_port->last_res = RX_BUF_L;
468 }
469
stm32_usart_receive_chars_dma(struct uart_port * port)470 static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port)
471 {
472 struct stm32_port *stm32_port = to_stm32_port(port);
473 unsigned int dma_size, size = 0;
474
475 /* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */
476 if (stm32_port->rx_dma_state.residue > stm32_port->last_res) {
477 /* Conditional first part: from last_res to end of DMA buffer */
478 dma_size = stm32_port->last_res;
479 stm32_usart_push_buffer_dma(port, dma_size);
480 size = dma_size;
481 }
482
483 dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue;
484 stm32_usart_push_buffer_dma(port, dma_size);
485 size += dma_size;
486
487 return size;
488 }
489
stm32_usart_receive_chars(struct uart_port * port,bool force_dma_flush)490 static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush)
491 {
492 struct stm32_port *stm32_port = to_stm32_port(port);
493 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
494 enum dma_status rx_dma_status;
495 u32 sr;
496 unsigned int size = 0;
497
498 if (stm32_usart_rx_dma_started(stm32_port) || force_dma_flush) {
499 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
500 stm32_port->rx_ch->cookie,
501 &stm32_port->rx_dma_state);
502 if (rx_dma_status == DMA_IN_PROGRESS ||
503 rx_dma_status == DMA_PAUSED) {
504 /* Empty DMA buffer */
505 size = stm32_usart_receive_chars_dma(port);
506 sr = readl_relaxed(port->membase + ofs->isr);
507 if (sr & USART_SR_ERR_MASK) {
508 /* Disable DMA request line */
509 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
510
511 /* Switch to PIO mode to handle the errors */
512 size += stm32_usart_receive_chars_pio(port);
513
514 /* Switch back to DMA mode */
515 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
516 }
517 } else {
518 /* Disable RX DMA */
519 stm32_usart_rx_dma_terminate(stm32_port);
520 /* Fall back to interrupt mode */
521 dev_dbg(port->dev, "DMA error, fallback to irq mode\n");
522 size = stm32_usart_receive_chars_pio(port);
523 }
524 } else {
525 size = stm32_usart_receive_chars_pio(port);
526 }
527
528 return size;
529 }
530
stm32_usart_rx_dma_complete(void * arg)531 static void stm32_usart_rx_dma_complete(void *arg)
532 {
533 struct uart_port *port = arg;
534 struct tty_port *tport = &port->state->port;
535 unsigned int size;
536 unsigned long flags;
537
538 spin_lock_irqsave(&port->lock, flags);
539 size = stm32_usart_receive_chars(port, false);
540 uart_unlock_and_check_sysrq_irqrestore(port, flags);
541 if (size)
542 tty_flip_buffer_push(tport);
543 }
544
stm32_usart_rx_dma_start_or_resume(struct uart_port * port)545 static int stm32_usart_rx_dma_start_or_resume(struct uart_port *port)
546 {
547 struct stm32_port *stm32_port = to_stm32_port(port);
548 struct dma_async_tx_descriptor *desc;
549 enum dma_status rx_dma_status;
550 int ret;
551
552 if (stm32_port->throttled)
553 return 0;
554
555 if (stm32_port->rx_dma_busy) {
556 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
557 stm32_port->rx_ch->cookie,
558 NULL);
559 if (rx_dma_status == DMA_IN_PROGRESS)
560 return 0;
561
562 if (rx_dma_status == DMA_PAUSED && !stm32_usart_rx_dma_resume(stm32_port))
563 return 0;
564
565 dev_err(port->dev, "DMA failed : status error.\n");
566 stm32_usart_rx_dma_terminate(stm32_port);
567 }
568
569 stm32_port->rx_dma_busy = true;
570
571 stm32_port->last_res = RX_BUF_L;
572 /* Prepare a DMA cyclic transaction */
573 desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch,
574 stm32_port->rx_dma_buf,
575 RX_BUF_L, RX_BUF_P,
576 DMA_DEV_TO_MEM,
577 DMA_PREP_INTERRUPT);
578 if (!desc) {
579 dev_err(port->dev, "rx dma prep cyclic failed\n");
580 stm32_port->rx_dma_busy = false;
581 return -ENODEV;
582 }
583
584 desc->callback = stm32_usart_rx_dma_complete;
585 desc->callback_param = port;
586
587 /* Push current DMA transaction in the pending queue */
588 ret = dma_submit_error(dmaengine_submit(desc));
589 if (ret) {
590 dmaengine_terminate_sync(stm32_port->rx_ch);
591 stm32_port->rx_dma_busy = false;
592 return ret;
593 }
594
595 /* Issue pending DMA requests */
596 dma_async_issue_pending(stm32_port->rx_ch);
597
598 return 0;
599 }
600
stm32_usart_tx_dma_terminate(struct stm32_port * stm32_port)601 static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port)
602 {
603 dmaengine_terminate_async(stm32_port->tx_ch);
604 stm32_port->tx_dma_busy = false;
605 }
606
stm32_usart_tx_dma_started(struct stm32_port * stm32_port)607 static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port)
608 {
609 /*
610 * We cannot use the function "dmaengine_tx_status" to know the
611 * status of DMA. This function does not show if the "dma complete"
612 * callback of the DMA transaction has been called. So we prefer
613 * to use "tx_dma_busy" flag to prevent dual DMA transaction at the
614 * same time.
615 */
616 return stm32_port->tx_dma_busy;
617 }
618
stm32_usart_tx_dma_pause(struct stm32_port * stm32_port)619 static int stm32_usart_tx_dma_pause(struct stm32_port *stm32_port)
620 {
621 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch,
622 DMA_IN_PROGRESS, dmaengine_pause,
623 stm32_usart_tx_dma_started,
624 stm32_usart_tx_dma_terminate);
625 }
626
stm32_usart_tx_dma_resume(struct stm32_port * stm32_port)627 static int stm32_usart_tx_dma_resume(struct stm32_port *stm32_port)
628 {
629 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch,
630 DMA_PAUSED, dmaengine_resume,
631 stm32_usart_tx_dma_started,
632 stm32_usart_tx_dma_terminate);
633 }
634
stm32_usart_tx_dma_complete(void * arg)635 static void stm32_usart_tx_dma_complete(void *arg)
636 {
637 struct uart_port *port = arg;
638 struct stm32_port *stm32port = to_stm32_port(port);
639 unsigned long flags;
640
641 stm32_usart_tx_dma_terminate(stm32port);
642
643 /* Let's see if we have pending data to send */
644 spin_lock_irqsave(&port->lock, flags);
645 stm32_usart_transmit_chars(port);
646 spin_unlock_irqrestore(&port->lock, flags);
647 }
648
stm32_usart_tx_interrupt_enable(struct uart_port * port)649 static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
650 {
651 struct stm32_port *stm32_port = to_stm32_port(port);
652 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
653
654 /*
655 * Enables TX FIFO threashold irq when FIFO is enabled,
656 * or TX empty irq when FIFO is disabled
657 */
658 if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
659 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
660 else
661 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
662 }
663
stm32_usart_tc_interrupt_enable(struct uart_port * port)664 static void stm32_usart_tc_interrupt_enable(struct uart_port *port)
665 {
666 struct stm32_port *stm32_port = to_stm32_port(port);
667 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
668
669 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE);
670 }
671
stm32_usart_tx_interrupt_disable(struct uart_port * port)672 static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
673 {
674 struct stm32_port *stm32_port = to_stm32_port(port);
675 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
676
677 if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
678 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
679 else
680 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
681 }
682
stm32_usart_tc_interrupt_disable(struct uart_port * port)683 static void stm32_usart_tc_interrupt_disable(struct uart_port *port)
684 {
685 struct stm32_port *stm32_port = to_stm32_port(port);
686 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
687
688 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE);
689 }
690
stm32_usart_transmit_chars_pio(struct uart_port * port)691 static void stm32_usart_transmit_chars_pio(struct uart_port *port)
692 {
693 struct stm32_port *stm32_port = to_stm32_port(port);
694 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
695 struct circ_buf *xmit = &port->state->xmit;
696
697 while (!uart_circ_empty(xmit)) {
698 /* Check that TDR is empty before filling FIFO */
699 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
700 break;
701 writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr);
702 uart_xmit_advance(port, 1);
703 }
704
705 /* rely on TXE irq (mask or unmask) for sending remaining data */
706 if (uart_circ_empty(xmit))
707 stm32_usart_tx_interrupt_disable(port);
708 else
709 stm32_usart_tx_interrupt_enable(port);
710 }
711
stm32_usart_transmit_chars_dma(struct uart_port * port)712 static void stm32_usart_transmit_chars_dma(struct uart_port *port)
713 {
714 struct stm32_port *stm32port = to_stm32_port(port);
715 struct circ_buf *xmit = &port->state->xmit;
716 struct dma_async_tx_descriptor *desc = NULL;
717 unsigned int count;
718 int ret;
719
720 if (stm32_usart_tx_dma_started(stm32port)) {
721 ret = stm32_usart_tx_dma_resume(stm32port);
722 if (ret < 0 && ret != -EAGAIN)
723 goto fallback_err;
724 return;
725 }
726
727 count = uart_circ_chars_pending(xmit);
728
729 if (count > TX_BUF_L)
730 count = TX_BUF_L;
731
732 if (xmit->tail < xmit->head) {
733 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count);
734 } else {
735 size_t one = UART_XMIT_SIZE - xmit->tail;
736 size_t two;
737
738 if (one > count)
739 one = count;
740 two = count - one;
741
742 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one);
743 if (two)
744 memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two);
745 }
746
747 desc = dmaengine_prep_slave_single(stm32port->tx_ch,
748 stm32port->tx_dma_buf,
749 count,
750 DMA_MEM_TO_DEV,
751 DMA_PREP_INTERRUPT);
752
753 if (!desc)
754 goto fallback_err;
755
756 /*
757 * Set "tx_dma_busy" flag. This flag will be released when
758 * dmaengine_terminate_async will be called. This flag helps
759 * transmit_chars_dma not to start another DMA transaction
760 * if the callback of the previous is not yet called.
761 */
762 stm32port->tx_dma_busy = true;
763
764 desc->callback = stm32_usart_tx_dma_complete;
765 desc->callback_param = port;
766
767 /* Push current DMA TX transaction in the pending queue */
768 /* DMA no yet started, safe to free resources */
769 ret = dma_submit_error(dmaengine_submit(desc));
770 if (ret) {
771 dev_err(port->dev, "DMA failed with error code: %d\n", ret);
772 stm32_usart_tx_dma_terminate(stm32port);
773 goto fallback_err;
774 }
775
776 /* Issue pending DMA TX requests */
777 dma_async_issue_pending(stm32port->tx_ch);
778
779 uart_xmit_advance(port, count);
780
781 return;
782
783 fallback_err:
784 stm32_usart_transmit_chars_pio(port);
785 }
786
stm32_usart_transmit_chars(struct uart_port * port)787 static void stm32_usart_transmit_chars(struct uart_port *port)
788 {
789 struct stm32_port *stm32_port = to_stm32_port(port);
790 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
791 struct circ_buf *xmit = &port->state->xmit;
792 u32 isr;
793 int ret;
794
795 if (!stm32_port->hw_flow_control &&
796 port->rs485.flags & SER_RS485_ENABLED &&
797 (port->x_char ||
798 !(uart_circ_empty(xmit) || uart_tx_stopped(port)))) {
799 stm32_usart_tc_interrupt_disable(port);
800 stm32_usart_rs485_rts_enable(port);
801 }
802
803 if (port->x_char) {
804 /* dma terminate may have been called in case of dma pause failure */
805 stm32_usart_tx_dma_pause(stm32_port);
806
807 /* Check that TDR is empty before filling FIFO */
808 ret =
809 readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
810 isr,
811 (isr & USART_SR_TXE),
812 10, 1000);
813 if (ret)
814 dev_warn(port->dev, "1 character may be erased\n");
815
816 writel_relaxed(port->x_char, port->membase + ofs->tdr);
817 port->x_char = 0;
818 port->icount.tx++;
819
820 /* dma terminate may have been called in case of dma resume failure */
821 stm32_usart_tx_dma_resume(stm32_port);
822 return;
823 }
824
825 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
826 stm32_usart_tx_interrupt_disable(port);
827 return;
828 }
829
830 if (ofs->icr == UNDEF_REG)
831 stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC);
832 else
833 writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
834
835 if (stm32_port->tx_ch)
836 stm32_usart_transmit_chars_dma(port);
837 else
838 stm32_usart_transmit_chars_pio(port);
839
840 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
841 uart_write_wakeup(port);
842
843 if (uart_circ_empty(xmit)) {
844 stm32_usart_tx_interrupt_disable(port);
845 if (!stm32_port->hw_flow_control &&
846 port->rs485.flags & SER_RS485_ENABLED) {
847 stm32_usart_tc_interrupt_enable(port);
848 }
849 }
850 }
851
stm32_usart_interrupt(int irq,void * ptr)852 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
853 {
854 struct uart_port *port = ptr;
855 struct tty_port *tport = &port->state->port;
856 struct stm32_port *stm32_port = to_stm32_port(port);
857 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
858 u32 sr;
859 unsigned int size;
860
861 sr = readl_relaxed(port->membase + ofs->isr);
862
863 if (!stm32_port->hw_flow_control &&
864 port->rs485.flags & SER_RS485_ENABLED &&
865 (sr & USART_SR_TC)) {
866 stm32_usart_tc_interrupt_disable(port);
867 stm32_usart_rs485_rts_disable(port);
868 }
869
870 if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
871 writel_relaxed(USART_ICR_RTOCF,
872 port->membase + ofs->icr);
873
874 if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
875 /* Clear wake up flag and disable wake up interrupt */
876 writel_relaxed(USART_ICR_WUCF,
877 port->membase + ofs->icr);
878 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
879 if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
880 pm_wakeup_event(tport->tty->dev, 0);
881 }
882
883 /*
884 * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request
885 * line has been masked by HW and rx data are stacking in FIFO.
886 */
887 if (!stm32_port->throttled) {
888 if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) ||
889 ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) {
890 spin_lock(&port->lock);
891 size = stm32_usart_receive_chars(port, false);
892 uart_unlock_and_check_sysrq(port);
893 if (size)
894 tty_flip_buffer_push(tport);
895 }
896 }
897
898 if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
899 spin_lock(&port->lock);
900 stm32_usart_transmit_chars(port);
901 spin_unlock(&port->lock);
902 }
903
904 /* Receiver timeout irq for DMA RX */
905 if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) {
906 spin_lock(&port->lock);
907 size = stm32_usart_receive_chars(port, false);
908 uart_unlock_and_check_sysrq(port);
909 if (size)
910 tty_flip_buffer_push(tport);
911 }
912
913 return IRQ_HANDLED;
914 }
915
stm32_usart_set_mctrl(struct uart_port * port,unsigned int mctrl)916 static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
917 {
918 struct stm32_port *stm32_port = to_stm32_port(port);
919 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
920
921 if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
922 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE);
923 else
924 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
925
926 mctrl_gpio_set(stm32_port->gpios, mctrl);
927 }
928
stm32_usart_get_mctrl(struct uart_port * port)929 static unsigned int stm32_usart_get_mctrl(struct uart_port *port)
930 {
931 struct stm32_port *stm32_port = to_stm32_port(port);
932 unsigned int ret;
933
934 /* This routine is used to get signals of: DCD, DSR, RI, and CTS */
935 ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
936
937 return mctrl_gpio_get(stm32_port->gpios, &ret);
938 }
939
stm32_usart_enable_ms(struct uart_port * port)940 static void stm32_usart_enable_ms(struct uart_port *port)
941 {
942 mctrl_gpio_enable_ms(to_stm32_port(port)->gpios);
943 }
944
stm32_usart_disable_ms(struct uart_port * port)945 static void stm32_usart_disable_ms(struct uart_port *port)
946 {
947 mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
948 }
949
950 /* Transmit stop */
stm32_usart_stop_tx(struct uart_port * port)951 static void stm32_usart_stop_tx(struct uart_port *port)
952 {
953 struct stm32_port *stm32_port = to_stm32_port(port);
954
955 stm32_usart_tx_interrupt_disable(port);
956
957 /* dma terminate may have been called in case of dma pause failure */
958 stm32_usart_tx_dma_pause(stm32_port);
959
960 stm32_usart_rs485_rts_disable(port);
961 }
962
963 /* There are probably characters waiting to be transmitted. */
stm32_usart_start_tx(struct uart_port * port)964 static void stm32_usart_start_tx(struct uart_port *port)
965 {
966 struct circ_buf *xmit = &port->state->xmit;
967
968 if (uart_circ_empty(xmit) && !port->x_char) {
969 stm32_usart_rs485_rts_disable(port);
970 return;
971 }
972
973 stm32_usart_rs485_rts_enable(port);
974
975 stm32_usart_transmit_chars(port);
976 }
977
978 /* Flush the transmit buffer. */
stm32_usart_flush_buffer(struct uart_port * port)979 static void stm32_usart_flush_buffer(struct uart_port *port)
980 {
981 struct stm32_port *stm32_port = to_stm32_port(port);
982
983 if (stm32_port->tx_ch)
984 stm32_usart_tx_dma_terminate(stm32_port);
985 }
986
987 /* Throttle the remote when input buffer is about to overflow. */
stm32_usart_throttle(struct uart_port * port)988 static void stm32_usart_throttle(struct uart_port *port)
989 {
990 struct stm32_port *stm32_port = to_stm32_port(port);
991 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
992 unsigned long flags;
993
994 spin_lock_irqsave(&port->lock, flags);
995
996 /*
997 * Pause DMA transfer, so the RX data gets queued into the FIFO.
998 * Hardware flow control is triggered when RX FIFO is full.
999 */
1000 stm32_usart_rx_dma_pause(stm32_port);
1001
1002 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
1003 if (stm32_port->cr3_irq)
1004 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
1005
1006 stm32_port->throttled = true;
1007 spin_unlock_irqrestore(&port->lock, flags);
1008 }
1009
1010 /* Unthrottle the remote, the input buffer can now accept data. */
stm32_usart_unthrottle(struct uart_port * port)1011 static void stm32_usart_unthrottle(struct uart_port *port)
1012 {
1013 struct stm32_port *stm32_port = to_stm32_port(port);
1014 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1015 unsigned long flags;
1016
1017 spin_lock_irqsave(&port->lock, flags);
1018 stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
1019 if (stm32_port->cr3_irq)
1020 stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
1021
1022 stm32_port->throttled = false;
1023
1024 /*
1025 * Switch back to DMA mode (resume DMA).
1026 * Hardware flow control is stopped when FIFO is not full any more.
1027 */
1028 if (stm32_port->rx_ch)
1029 stm32_usart_rx_dma_start_or_resume(port);
1030
1031 spin_unlock_irqrestore(&port->lock, flags);
1032 }
1033
1034 /* Receive stop */
stm32_usart_stop_rx(struct uart_port * port)1035 static void stm32_usart_stop_rx(struct uart_port *port)
1036 {
1037 struct stm32_port *stm32_port = to_stm32_port(port);
1038 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1039
1040 /* Disable DMA request line. */
1041 stm32_usart_rx_dma_pause(stm32_port);
1042
1043 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
1044 if (stm32_port->cr3_irq)
1045 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
1046 }
1047
1048 /* Handle breaks - ignored by us */
stm32_usart_break_ctl(struct uart_port * port,int break_state)1049 static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
1050 {
1051 }
1052
stm32_usart_startup(struct uart_port * port)1053 static int stm32_usart_startup(struct uart_port *port)
1054 {
1055 struct stm32_port *stm32_port = to_stm32_port(port);
1056 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1057 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1058 const char *name = to_platform_device(port->dev)->name;
1059 u32 val;
1060 int ret;
1061
1062 ret = request_irq(port->irq, stm32_usart_interrupt,
1063 IRQF_NO_SUSPEND, name, port);
1064 if (ret)
1065 return ret;
1066
1067 if (stm32_port->swap) {
1068 val = readl_relaxed(port->membase + ofs->cr2);
1069 val |= USART_CR2_SWAP;
1070 writel_relaxed(val, port->membase + ofs->cr2);
1071 }
1072
1073 /* RX FIFO Flush */
1074 if (ofs->rqr != UNDEF_REG)
1075 writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
1076
1077 if (stm32_port->rx_ch) {
1078 ret = stm32_usart_rx_dma_start_or_resume(port);
1079 if (ret) {
1080 free_irq(port->irq, port);
1081 return ret;
1082 }
1083 }
1084
1085 /* RX enabling */
1086 val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
1087 stm32_usart_set_bits(port, ofs->cr1, val);
1088
1089 return 0;
1090 }
1091
stm32_usart_shutdown(struct uart_port * port)1092 static void stm32_usart_shutdown(struct uart_port *port)
1093 {
1094 struct stm32_port *stm32_port = to_stm32_port(port);
1095 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1096 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1097 u32 val, isr;
1098 int ret;
1099
1100 if (stm32_usart_tx_dma_started(stm32_port))
1101 stm32_usart_tx_dma_terminate(stm32_port);
1102
1103 if (stm32_port->tx_ch)
1104 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
1105
1106 /* Disable modem control interrupts */
1107 stm32_usart_disable_ms(port);
1108
1109 val = USART_CR1_TXEIE | USART_CR1_TE;
1110 val |= stm32_port->cr1_irq | USART_CR1_RE;
1111 val |= BIT(cfg->uart_enable_bit);
1112 if (stm32_port->fifoen)
1113 val |= USART_CR1_FIFOEN;
1114
1115 ret = readl_relaxed_poll_timeout(port->membase + ofs->isr,
1116 isr, (isr & USART_SR_TC),
1117 10, 100000);
1118
1119 /* Send the TC error message only when ISR_TC is not set */
1120 if (ret)
1121 dev_err(port->dev, "Transmission is not complete\n");
1122
1123 /* Disable RX DMA. */
1124 if (stm32_port->rx_ch) {
1125 stm32_usart_rx_dma_terminate(stm32_port);
1126 dmaengine_synchronize(stm32_port->rx_ch);
1127 }
1128
1129 /* flush RX & TX FIFO */
1130 if (ofs->rqr != UNDEF_REG)
1131 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
1132 port->membase + ofs->rqr);
1133
1134 stm32_usart_clr_bits(port, ofs->cr1, val);
1135
1136 free_irq(port->irq, port);
1137 }
1138
stm32_usart_set_termios(struct uart_port * port,struct ktermios * termios,const struct ktermios * old)1139 static void stm32_usart_set_termios(struct uart_port *port,
1140 struct ktermios *termios,
1141 const struct ktermios *old)
1142 {
1143 struct stm32_port *stm32_port = to_stm32_port(port);
1144 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1145 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1146 struct serial_rs485 *rs485conf = &port->rs485;
1147 unsigned int baud, bits;
1148 u32 usartdiv, mantissa, fraction, oversampling;
1149 tcflag_t cflag = termios->c_cflag;
1150 u32 cr1, cr2, cr3, isr;
1151 unsigned long flags;
1152 int ret;
1153
1154 if (!stm32_port->hw_flow_control)
1155 cflag &= ~CRTSCTS;
1156
1157 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
1158
1159 spin_lock_irqsave(&port->lock, flags);
1160
1161 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
1162 isr,
1163 (isr & USART_SR_TC),
1164 10, 100000);
1165
1166 /* Send the TC error message only when ISR_TC is not set. */
1167 if (ret)
1168 dev_err(port->dev, "Transmission is not complete\n");
1169
1170 /* Stop serial port and reset value */
1171 writel_relaxed(0, port->membase + ofs->cr1);
1172
1173 /* flush RX & TX FIFO */
1174 if (ofs->rqr != UNDEF_REG)
1175 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
1176 port->membase + ofs->rqr);
1177
1178 cr1 = USART_CR1_TE | USART_CR1_RE;
1179 if (stm32_port->fifoen)
1180 cr1 |= USART_CR1_FIFOEN;
1181 cr2 = stm32_port->swap ? USART_CR2_SWAP : 0;
1182
1183 /* Tx and RX FIFO configuration */
1184 cr3 = readl_relaxed(port->membase + ofs->cr3);
1185 cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
1186 if (stm32_port->fifoen) {
1187 if (stm32_port->txftcfg >= 0)
1188 cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT;
1189 if (stm32_port->rxftcfg >= 0)
1190 cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT;
1191 }
1192
1193 if (cflag & CSTOPB)
1194 cr2 |= USART_CR2_STOP_2B;
1195
1196 bits = tty_get_char_size(cflag);
1197 stm32_port->rdr_mask = (BIT(bits) - 1);
1198
1199 if (cflag & PARENB) {
1200 bits++;
1201 cr1 |= USART_CR1_PCE;
1202 }
1203
1204 /*
1205 * Word length configuration:
1206 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01
1207 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10
1208 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
1209 * M0 and M1 already cleared by cr1 initialization.
1210 */
1211 if (bits == 9) {
1212 cr1 |= USART_CR1_M0;
1213 } else if ((bits == 7) && cfg->has_7bits_data) {
1214 cr1 |= USART_CR1_M1;
1215 } else if (bits != 8) {
1216 dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
1217 , bits);
1218 cflag &= ~CSIZE;
1219 cflag |= CS8;
1220 termios->c_cflag = cflag;
1221 bits = 8;
1222 if (cflag & PARENB) {
1223 bits++;
1224 cr1 |= USART_CR1_M0;
1225 }
1226 }
1227
1228 if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
1229 (stm32_port->fifoen &&
1230 stm32_port->rxftcfg >= 0))) {
1231 if (cflag & CSTOPB)
1232 bits = bits + 3; /* 1 start bit + 2 stop bits */
1233 else
1234 bits = bits + 2; /* 1 start bit + 1 stop bit */
1235
1236 /* RX timeout irq to occur after last stop bit + bits */
1237 stm32_port->cr1_irq = USART_CR1_RTOIE;
1238 writel_relaxed(bits, port->membase + ofs->rtor);
1239 cr2 |= USART_CR2_RTOEN;
1240 /*
1241 * Enable fifo threshold irq in two cases, either when there is no DMA, or when
1242 * wake up over usart, from low power until the DMA gets re-enabled by resume.
1243 */
1244 stm32_port->cr3_irq = USART_CR3_RXFTIE;
1245 }
1246
1247 cr1 |= stm32_port->cr1_irq;
1248 cr3 |= stm32_port->cr3_irq;
1249
1250 if (cflag & PARODD)
1251 cr1 |= USART_CR1_PS;
1252
1253 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1254 if (cflag & CRTSCTS) {
1255 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
1256 cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
1257 }
1258
1259 usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
1260
1261 /*
1262 * The USART supports 16 or 8 times oversampling.
1263 * By default we prefer 16 times oversampling, so that the receiver
1264 * has a better tolerance to clock deviations.
1265 * 8 times oversampling is only used to achieve higher speeds.
1266 */
1267 if (usartdiv < 16) {
1268 oversampling = 8;
1269 cr1 |= USART_CR1_OVER8;
1270 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
1271 } else {
1272 oversampling = 16;
1273 cr1 &= ~USART_CR1_OVER8;
1274 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
1275 }
1276
1277 mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
1278 fraction = usartdiv % oversampling;
1279 writel_relaxed(mantissa | fraction, port->membase + ofs->brr);
1280
1281 uart_update_timeout(port, cflag, baud);
1282
1283 port->read_status_mask = USART_SR_ORE;
1284 if (termios->c_iflag & INPCK)
1285 port->read_status_mask |= USART_SR_PE | USART_SR_FE;
1286 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1287 port->read_status_mask |= USART_SR_FE;
1288
1289 /* Characters to ignore */
1290 port->ignore_status_mask = 0;
1291 if (termios->c_iflag & IGNPAR)
1292 port->ignore_status_mask = USART_SR_PE | USART_SR_FE;
1293 if (termios->c_iflag & IGNBRK) {
1294 port->ignore_status_mask |= USART_SR_FE;
1295 /*
1296 * If we're ignoring parity and break indicators,
1297 * ignore overruns too (for real raw support).
1298 */
1299 if (termios->c_iflag & IGNPAR)
1300 port->ignore_status_mask |= USART_SR_ORE;
1301 }
1302
1303 /* Ignore all characters if CREAD is not set */
1304 if ((termios->c_cflag & CREAD) == 0)
1305 port->ignore_status_mask |= USART_SR_DUMMY_RX;
1306
1307 if (stm32_port->rx_ch) {
1308 /*
1309 * Setup DMA to collect only valid data and enable error irqs.
1310 * This also enables break reception when using DMA.
1311 */
1312 cr1 |= USART_CR1_PEIE;
1313 cr3 |= USART_CR3_EIE;
1314 cr3 |= USART_CR3_DMAR;
1315 cr3 |= USART_CR3_DDRE;
1316 }
1317
1318 if (stm32_port->tx_ch)
1319 cr3 |= USART_CR3_DMAT;
1320
1321 if (rs485conf->flags & SER_RS485_ENABLED) {
1322 stm32_usart_config_reg_rs485(&cr1, &cr3,
1323 rs485conf->delay_rts_before_send,
1324 rs485conf->delay_rts_after_send,
1325 baud);
1326 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
1327 cr3 &= ~USART_CR3_DEP;
1328 rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
1329 } else {
1330 cr3 |= USART_CR3_DEP;
1331 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
1332 }
1333
1334 } else {
1335 cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP);
1336 cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
1337 }
1338
1339 /* Configure wake up from low power on start bit detection */
1340 if (stm32_port->wakeup_src) {
1341 cr3 &= ~USART_CR3_WUS_MASK;
1342 cr3 |= USART_CR3_WUS_START_BIT;
1343 }
1344
1345 writel_relaxed(cr3, port->membase + ofs->cr3);
1346 writel_relaxed(cr2, port->membase + ofs->cr2);
1347 writel_relaxed(cr1, port->membase + ofs->cr1);
1348
1349 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1350 spin_unlock_irqrestore(&port->lock, flags);
1351
1352 /* Handle modem control interrupts */
1353 if (UART_ENABLE_MS(port, termios->c_cflag))
1354 stm32_usart_enable_ms(port);
1355 else
1356 stm32_usart_disable_ms(port);
1357 }
1358
stm32_usart_type(struct uart_port * port)1359 static const char *stm32_usart_type(struct uart_port *port)
1360 {
1361 return (port->type == PORT_STM32) ? DRIVER_NAME : NULL;
1362 }
1363
stm32_usart_release_port(struct uart_port * port)1364 static void stm32_usart_release_port(struct uart_port *port)
1365 {
1366 }
1367
stm32_usart_request_port(struct uart_port * port)1368 static int stm32_usart_request_port(struct uart_port *port)
1369 {
1370 return 0;
1371 }
1372
stm32_usart_config_port(struct uart_port * port,int flags)1373 static void stm32_usart_config_port(struct uart_port *port, int flags)
1374 {
1375 if (flags & UART_CONFIG_TYPE)
1376 port->type = PORT_STM32;
1377 }
1378
1379 static int
stm32_usart_verify_port(struct uart_port * port,struct serial_struct * ser)1380 stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser)
1381 {
1382 /* No user changeable parameters */
1383 return -EINVAL;
1384 }
1385
stm32_usart_pm(struct uart_port * port,unsigned int state,unsigned int oldstate)1386 static void stm32_usart_pm(struct uart_port *port, unsigned int state,
1387 unsigned int oldstate)
1388 {
1389 struct stm32_port *stm32port = container_of(port,
1390 struct stm32_port, port);
1391 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1392 const struct stm32_usart_config *cfg = &stm32port->info->cfg;
1393 unsigned long flags;
1394
1395 switch (state) {
1396 case UART_PM_STATE_ON:
1397 pm_runtime_get_sync(port->dev);
1398 break;
1399 case UART_PM_STATE_OFF:
1400 spin_lock_irqsave(&port->lock, flags);
1401 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1402 spin_unlock_irqrestore(&port->lock, flags);
1403 pm_runtime_put_sync(port->dev);
1404 break;
1405 }
1406 }
1407
1408 #if defined(CONFIG_CONSOLE_POLL)
1409
1410 /* Callbacks for characters polling in debug context (i.e. KGDB). */
stm32_usart_poll_init(struct uart_port * port)1411 static int stm32_usart_poll_init(struct uart_port *port)
1412 {
1413 struct stm32_port *stm32_port = to_stm32_port(port);
1414
1415 return clk_prepare_enable(stm32_port->clk);
1416 }
1417
stm32_usart_poll_get_char(struct uart_port * port)1418 static int stm32_usart_poll_get_char(struct uart_port *port)
1419 {
1420 struct stm32_port *stm32_port = to_stm32_port(port);
1421 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1422
1423 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE))
1424 return NO_POLL_CHAR;
1425
1426 return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask;
1427 }
1428
stm32_usart_poll_put_char(struct uart_port * port,unsigned char ch)1429 static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch)
1430 {
1431 stm32_usart_console_putchar(port, ch);
1432 }
1433 #endif /* CONFIG_CONSOLE_POLL */
1434
1435 static const struct uart_ops stm32_uart_ops = {
1436 .tx_empty = stm32_usart_tx_empty,
1437 .set_mctrl = stm32_usart_set_mctrl,
1438 .get_mctrl = stm32_usart_get_mctrl,
1439 .stop_tx = stm32_usart_stop_tx,
1440 .start_tx = stm32_usart_start_tx,
1441 .throttle = stm32_usart_throttle,
1442 .unthrottle = stm32_usart_unthrottle,
1443 .stop_rx = stm32_usart_stop_rx,
1444 .enable_ms = stm32_usart_enable_ms,
1445 .break_ctl = stm32_usart_break_ctl,
1446 .startup = stm32_usart_startup,
1447 .shutdown = stm32_usart_shutdown,
1448 .flush_buffer = stm32_usart_flush_buffer,
1449 .set_termios = stm32_usart_set_termios,
1450 .pm = stm32_usart_pm,
1451 .type = stm32_usart_type,
1452 .release_port = stm32_usart_release_port,
1453 .request_port = stm32_usart_request_port,
1454 .config_port = stm32_usart_config_port,
1455 .verify_port = stm32_usart_verify_port,
1456 #if defined(CONFIG_CONSOLE_POLL)
1457 .poll_init = stm32_usart_poll_init,
1458 .poll_get_char = stm32_usart_poll_get_char,
1459 .poll_put_char = stm32_usart_poll_put_char,
1460 #endif /* CONFIG_CONSOLE_POLL */
1461 };
1462
1463 /*
1464 * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG)
1465 * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case,
1466 * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE.
1467 * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1.
1468 */
1469 static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 };
1470
stm32_usart_get_ftcfg(struct platform_device * pdev,const char * p,int * ftcfg)1471 static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p,
1472 int *ftcfg)
1473 {
1474 u32 bytes, i;
1475
1476 /* DT option to get RX & TX FIFO threshold (default to 8 bytes) */
1477 if (of_property_read_u32(pdev->dev.of_node, p, &bytes))
1478 bytes = 8;
1479
1480 for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++)
1481 if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes)
1482 break;
1483 if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg))
1484 i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1;
1485
1486 dev_dbg(&pdev->dev, "%s set to %d bytes\n", p,
1487 stm32h7_usart_fifo_thresh_cfg[i]);
1488
1489 /* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */
1490 if (i)
1491 *ftcfg = i - 1;
1492 else
1493 *ftcfg = -EINVAL;
1494 }
1495
stm32_usart_deinit_port(struct stm32_port * stm32port)1496 static void stm32_usart_deinit_port(struct stm32_port *stm32port)
1497 {
1498 clk_disable_unprepare(stm32port->clk);
1499 }
1500
1501 static const struct serial_rs485 stm32_rs485_supported = {
1502 .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
1503 SER_RS485_RX_DURING_TX,
1504 .delay_rts_before_send = 1,
1505 .delay_rts_after_send = 1,
1506 };
1507
stm32_usart_init_port(struct stm32_port * stm32port,struct platform_device * pdev)1508 static int stm32_usart_init_port(struct stm32_port *stm32port,
1509 struct platform_device *pdev)
1510 {
1511 struct uart_port *port = &stm32port->port;
1512 struct resource *res;
1513 int ret, irq;
1514
1515 irq = platform_get_irq(pdev, 0);
1516 if (irq < 0)
1517 return irq;
1518
1519 port->iotype = UPIO_MEM;
1520 port->flags = UPF_BOOT_AUTOCONF;
1521 port->ops = &stm32_uart_ops;
1522 port->dev = &pdev->dev;
1523 port->fifosize = stm32port->info->cfg.fifosize;
1524 port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
1525 port->irq = irq;
1526 port->rs485_config = stm32_usart_config_rs485;
1527 port->rs485_supported = stm32_rs485_supported;
1528
1529 ret = stm32_usart_init_rs485(port, pdev);
1530 if (ret)
1531 return ret;
1532
1533 stm32port->wakeup_src = stm32port->info->cfg.has_wakeup &&
1534 of_property_read_bool(pdev->dev.of_node, "wakeup-source");
1535
1536 stm32port->swap = stm32port->info->cfg.has_swap &&
1537 of_property_read_bool(pdev->dev.of_node, "rx-tx-swap");
1538
1539 stm32port->fifoen = stm32port->info->cfg.has_fifo;
1540 if (stm32port->fifoen) {
1541 stm32_usart_get_ftcfg(pdev, "rx-threshold",
1542 &stm32port->rxftcfg);
1543 stm32_usart_get_ftcfg(pdev, "tx-threshold",
1544 &stm32port->txftcfg);
1545 }
1546
1547 port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1548 if (IS_ERR(port->membase))
1549 return PTR_ERR(port->membase);
1550 port->mapbase = res->start;
1551
1552 spin_lock_init(&port->lock);
1553
1554 stm32port->clk = devm_clk_get(&pdev->dev, NULL);
1555 if (IS_ERR(stm32port->clk))
1556 return PTR_ERR(stm32port->clk);
1557
1558 /* Ensure that clk rate is correct by enabling the clk */
1559 ret = clk_prepare_enable(stm32port->clk);
1560 if (ret)
1561 return ret;
1562
1563 stm32port->port.uartclk = clk_get_rate(stm32port->clk);
1564 if (!stm32port->port.uartclk) {
1565 ret = -EINVAL;
1566 goto err_clk;
1567 }
1568
1569 stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0);
1570 if (IS_ERR(stm32port->gpios)) {
1571 ret = PTR_ERR(stm32port->gpios);
1572 goto err_clk;
1573 }
1574
1575 /*
1576 * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts"
1577 * properties should not be specified.
1578 */
1579 if (stm32port->hw_flow_control) {
1580 if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) ||
1581 mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) {
1582 dev_err(&pdev->dev, "Conflicting RTS/CTS config\n");
1583 ret = -EINVAL;
1584 goto err_clk;
1585 }
1586 }
1587
1588 return ret;
1589
1590 err_clk:
1591 clk_disable_unprepare(stm32port->clk);
1592
1593 return ret;
1594 }
1595
stm32_usart_of_get_port(struct platform_device * pdev)1596 static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev)
1597 {
1598 struct device_node *np = pdev->dev.of_node;
1599 int id;
1600
1601 if (!np)
1602 return NULL;
1603
1604 id = of_alias_get_id(np, "serial");
1605 if (id < 0) {
1606 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id);
1607 return NULL;
1608 }
1609
1610 if (WARN_ON(id >= STM32_MAX_PORTS))
1611 return NULL;
1612
1613 stm32_ports[id].hw_flow_control =
1614 of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ ||
1615 of_property_read_bool (np, "uart-has-rtscts");
1616 stm32_ports[id].port.line = id;
1617 stm32_ports[id].cr1_irq = USART_CR1_RXNEIE;
1618 stm32_ports[id].cr3_irq = 0;
1619 stm32_ports[id].last_res = RX_BUF_L;
1620 return &stm32_ports[id];
1621 }
1622
1623 #ifdef CONFIG_OF
1624 static const struct of_device_id stm32_match[] = {
1625 { .compatible = "st,stm32-uart", .data = &stm32f4_info},
1626 { .compatible = "st,stm32f7-uart", .data = &stm32f7_info},
1627 { .compatible = "st,stm32h7-uart", .data = &stm32h7_info},
1628 {},
1629 };
1630
1631 MODULE_DEVICE_TABLE(of, stm32_match);
1632 #endif
1633
stm32_usart_of_dma_rx_remove(struct stm32_port * stm32port,struct platform_device * pdev)1634 static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port,
1635 struct platform_device *pdev)
1636 {
1637 if (stm32port->rx_buf)
1638 dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf,
1639 stm32port->rx_dma_buf);
1640 }
1641
stm32_usart_of_dma_rx_probe(struct stm32_port * stm32port,struct platform_device * pdev)1642 static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
1643 struct platform_device *pdev)
1644 {
1645 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1646 struct uart_port *port = &stm32port->port;
1647 struct device *dev = &pdev->dev;
1648 struct dma_slave_config config;
1649 int ret;
1650
1651 stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L,
1652 &stm32port->rx_dma_buf,
1653 GFP_KERNEL);
1654 if (!stm32port->rx_buf)
1655 return -ENOMEM;
1656
1657 /* Configure DMA channel */
1658 memset(&config, 0, sizeof(config));
1659 config.src_addr = port->mapbase + ofs->rdr;
1660 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1661
1662 ret = dmaengine_slave_config(stm32port->rx_ch, &config);
1663 if (ret < 0) {
1664 dev_err(dev, "rx dma channel config failed\n");
1665 stm32_usart_of_dma_rx_remove(stm32port, pdev);
1666 return ret;
1667 }
1668
1669 return 0;
1670 }
1671
stm32_usart_of_dma_tx_remove(struct stm32_port * stm32port,struct platform_device * pdev)1672 static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port,
1673 struct platform_device *pdev)
1674 {
1675 if (stm32port->tx_buf)
1676 dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf,
1677 stm32port->tx_dma_buf);
1678 }
1679
stm32_usart_of_dma_tx_probe(struct stm32_port * stm32port,struct platform_device * pdev)1680 static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
1681 struct platform_device *pdev)
1682 {
1683 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1684 struct uart_port *port = &stm32port->port;
1685 struct device *dev = &pdev->dev;
1686 struct dma_slave_config config;
1687 int ret;
1688
1689 stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L,
1690 &stm32port->tx_dma_buf,
1691 GFP_KERNEL);
1692 if (!stm32port->tx_buf)
1693 return -ENOMEM;
1694
1695 /* Configure DMA channel */
1696 memset(&config, 0, sizeof(config));
1697 config.dst_addr = port->mapbase + ofs->tdr;
1698 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1699
1700 ret = dmaengine_slave_config(stm32port->tx_ch, &config);
1701 if (ret < 0) {
1702 dev_err(dev, "tx dma channel config failed\n");
1703 stm32_usart_of_dma_tx_remove(stm32port, pdev);
1704 return ret;
1705 }
1706
1707 return 0;
1708 }
1709
stm32_usart_serial_probe(struct platform_device * pdev)1710 static int stm32_usart_serial_probe(struct platform_device *pdev)
1711 {
1712 struct stm32_port *stm32port;
1713 int ret;
1714
1715 stm32port = stm32_usart_of_get_port(pdev);
1716 if (!stm32port)
1717 return -ENODEV;
1718
1719 stm32port->info = of_device_get_match_data(&pdev->dev);
1720 if (!stm32port->info)
1721 return -EINVAL;
1722
1723 stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx");
1724 if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER)
1725 return -EPROBE_DEFER;
1726
1727 /* Fall back in interrupt mode for any non-deferral error */
1728 if (IS_ERR(stm32port->rx_ch))
1729 stm32port->rx_ch = NULL;
1730
1731 stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx");
1732 if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) {
1733 ret = -EPROBE_DEFER;
1734 goto err_dma_rx;
1735 }
1736 /* Fall back in interrupt mode for any non-deferral error */
1737 if (IS_ERR(stm32port->tx_ch))
1738 stm32port->tx_ch = NULL;
1739
1740 ret = stm32_usart_init_port(stm32port, pdev);
1741 if (ret)
1742 goto err_dma_tx;
1743
1744 if (stm32port->wakeup_src) {
1745 device_set_wakeup_capable(&pdev->dev, true);
1746 ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq);
1747 if (ret)
1748 goto err_deinit_port;
1749 }
1750
1751 if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) {
1752 /* Fall back in interrupt mode */
1753 dma_release_channel(stm32port->rx_ch);
1754 stm32port->rx_ch = NULL;
1755 }
1756
1757 if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) {
1758 /* Fall back in interrupt mode */
1759 dma_release_channel(stm32port->tx_ch);
1760 stm32port->tx_ch = NULL;
1761 }
1762
1763 if (!stm32port->rx_ch)
1764 dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n");
1765 if (!stm32port->tx_ch)
1766 dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n");
1767
1768 platform_set_drvdata(pdev, &stm32port->port);
1769
1770 pm_runtime_get_noresume(&pdev->dev);
1771 pm_runtime_set_active(&pdev->dev);
1772 pm_runtime_enable(&pdev->dev);
1773
1774 ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
1775 if (ret)
1776 goto err_port;
1777
1778 pm_runtime_put_sync(&pdev->dev);
1779
1780 return 0;
1781
1782 err_port:
1783 pm_runtime_disable(&pdev->dev);
1784 pm_runtime_set_suspended(&pdev->dev);
1785 pm_runtime_put_noidle(&pdev->dev);
1786
1787 if (stm32port->tx_ch)
1788 stm32_usart_of_dma_tx_remove(stm32port, pdev);
1789 if (stm32port->rx_ch)
1790 stm32_usart_of_dma_rx_remove(stm32port, pdev);
1791
1792 if (stm32port->wakeup_src)
1793 dev_pm_clear_wake_irq(&pdev->dev);
1794
1795 err_deinit_port:
1796 if (stm32port->wakeup_src)
1797 device_set_wakeup_capable(&pdev->dev, false);
1798
1799 stm32_usart_deinit_port(stm32port);
1800
1801 err_dma_tx:
1802 if (stm32port->tx_ch)
1803 dma_release_channel(stm32port->tx_ch);
1804
1805 err_dma_rx:
1806 if (stm32port->rx_ch)
1807 dma_release_channel(stm32port->rx_ch);
1808
1809 return ret;
1810 }
1811
stm32_usart_serial_remove(struct platform_device * pdev)1812 static int stm32_usart_serial_remove(struct platform_device *pdev)
1813 {
1814 struct uart_port *port = platform_get_drvdata(pdev);
1815 struct stm32_port *stm32_port = to_stm32_port(port);
1816 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1817 u32 cr3;
1818
1819 pm_runtime_get_sync(&pdev->dev);
1820 uart_remove_one_port(&stm32_usart_driver, port);
1821
1822 pm_runtime_disable(&pdev->dev);
1823 pm_runtime_set_suspended(&pdev->dev);
1824 pm_runtime_put_noidle(&pdev->dev);
1825
1826 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE);
1827
1828 if (stm32_port->tx_ch) {
1829 stm32_usart_of_dma_tx_remove(stm32_port, pdev);
1830 dma_release_channel(stm32_port->tx_ch);
1831 }
1832
1833 if (stm32_port->rx_ch) {
1834 stm32_usart_of_dma_rx_remove(stm32_port, pdev);
1835 dma_release_channel(stm32_port->rx_ch);
1836 }
1837
1838 cr3 = readl_relaxed(port->membase + ofs->cr3);
1839 cr3 &= ~USART_CR3_EIE;
1840 cr3 &= ~USART_CR3_DMAR;
1841 cr3 &= ~USART_CR3_DMAT;
1842 cr3 &= ~USART_CR3_DDRE;
1843 writel_relaxed(cr3, port->membase + ofs->cr3);
1844
1845 if (stm32_port->wakeup_src) {
1846 dev_pm_clear_wake_irq(&pdev->dev);
1847 device_init_wakeup(&pdev->dev, false);
1848 }
1849
1850 stm32_usart_deinit_port(stm32_port);
1851
1852 return 0;
1853 }
1854
stm32_usart_console_putchar(struct uart_port * port,unsigned char ch)1855 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
1856 {
1857 struct stm32_port *stm32_port = to_stm32_port(port);
1858 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1859 u32 isr;
1860 int ret;
1861
1862 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr,
1863 (isr & USART_SR_TXE), 100,
1864 STM32_USART_TIMEOUT_USEC);
1865 if (ret != 0) {
1866 dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret);
1867 return;
1868 }
1869 writel_relaxed(ch, port->membase + ofs->tdr);
1870 }
1871
1872 #ifdef CONFIG_SERIAL_STM32_CONSOLE
stm32_usart_console_write(struct console * co,const char * s,unsigned int cnt)1873 static void stm32_usart_console_write(struct console *co, const char *s,
1874 unsigned int cnt)
1875 {
1876 struct uart_port *port = &stm32_ports[co->index].port;
1877 struct stm32_port *stm32_port = to_stm32_port(port);
1878 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1879 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1880 unsigned long flags;
1881 u32 old_cr1, new_cr1;
1882 int locked = 1;
1883
1884 if (oops_in_progress)
1885 locked = spin_trylock_irqsave(&port->lock, flags);
1886 else
1887 spin_lock_irqsave(&port->lock, flags);
1888
1889 /* Save and disable interrupts, enable the transmitter */
1890 old_cr1 = readl_relaxed(port->membase + ofs->cr1);
1891 new_cr1 = old_cr1 & ~USART_CR1_IE_MASK;
1892 new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit);
1893 writel_relaxed(new_cr1, port->membase + ofs->cr1);
1894
1895 uart_console_write(port, s, cnt, stm32_usart_console_putchar);
1896
1897 /* Restore interrupt state */
1898 writel_relaxed(old_cr1, port->membase + ofs->cr1);
1899
1900 if (locked)
1901 spin_unlock_irqrestore(&port->lock, flags);
1902 }
1903
stm32_usart_console_setup(struct console * co,char * options)1904 static int stm32_usart_console_setup(struct console *co, char *options)
1905 {
1906 struct stm32_port *stm32port;
1907 int baud = 9600;
1908 int bits = 8;
1909 int parity = 'n';
1910 int flow = 'n';
1911
1912 if (co->index >= STM32_MAX_PORTS)
1913 return -ENODEV;
1914
1915 stm32port = &stm32_ports[co->index];
1916
1917 /*
1918 * This driver does not support early console initialization
1919 * (use ARM early printk support instead), so we only expect
1920 * this to be called during the uart port registration when the
1921 * driver gets probed and the port should be mapped at that point.
1922 */
1923 if (stm32port->port.mapbase == 0 || !stm32port->port.membase)
1924 return -ENXIO;
1925
1926 if (options)
1927 uart_parse_options(options, &baud, &parity, &bits, &flow);
1928
1929 return uart_set_options(&stm32port->port, co, baud, parity, bits, flow);
1930 }
1931
1932 static struct console stm32_console = {
1933 .name = STM32_SERIAL_NAME,
1934 .device = uart_console_device,
1935 .write = stm32_usart_console_write,
1936 .setup = stm32_usart_console_setup,
1937 .flags = CON_PRINTBUFFER,
1938 .index = -1,
1939 .data = &stm32_usart_driver,
1940 };
1941
1942 #define STM32_SERIAL_CONSOLE (&stm32_console)
1943
1944 #else
1945 #define STM32_SERIAL_CONSOLE NULL
1946 #endif /* CONFIG_SERIAL_STM32_CONSOLE */
1947
1948 #ifdef CONFIG_SERIAL_EARLYCON
early_stm32_usart_console_putchar(struct uart_port * port,unsigned char ch)1949 static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
1950 {
1951 struct stm32_usart_info *info = port->private_data;
1952
1953 while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE))
1954 cpu_relax();
1955
1956 writel_relaxed(ch, port->membase + info->ofs.tdr);
1957 }
1958
early_stm32_serial_write(struct console * console,const char * s,unsigned int count)1959 static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count)
1960 {
1961 struct earlycon_device *device = console->data;
1962 struct uart_port *port = &device->port;
1963
1964 uart_console_write(port, s, count, early_stm32_usart_console_putchar);
1965 }
1966
early_stm32_h7_serial_setup(struct earlycon_device * device,const char * options)1967 static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options)
1968 {
1969 if (!(device->port.membase || device->port.iobase))
1970 return -ENODEV;
1971 device->port.private_data = &stm32h7_info;
1972 device->con->write = early_stm32_serial_write;
1973 return 0;
1974 }
1975
early_stm32_f7_serial_setup(struct earlycon_device * device,const char * options)1976 static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options)
1977 {
1978 if (!(device->port.membase || device->port.iobase))
1979 return -ENODEV;
1980 device->port.private_data = &stm32f7_info;
1981 device->con->write = early_stm32_serial_write;
1982 return 0;
1983 }
1984
early_stm32_f4_serial_setup(struct earlycon_device * device,const char * options)1985 static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options)
1986 {
1987 if (!(device->port.membase || device->port.iobase))
1988 return -ENODEV;
1989 device->port.private_data = &stm32f4_info;
1990 device->con->write = early_stm32_serial_write;
1991 return 0;
1992 }
1993
1994 OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup);
1995 OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup);
1996 OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup);
1997 #endif /* CONFIG_SERIAL_EARLYCON */
1998
1999 static struct uart_driver stm32_usart_driver = {
2000 .driver_name = DRIVER_NAME,
2001 .dev_name = STM32_SERIAL_NAME,
2002 .major = 0,
2003 .minor = 0,
2004 .nr = STM32_MAX_PORTS,
2005 .cons = STM32_SERIAL_CONSOLE,
2006 };
2007
stm32_usart_serial_en_wakeup(struct uart_port * port,bool enable)2008 static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
2009 bool enable)
2010 {
2011 struct stm32_port *stm32_port = to_stm32_port(port);
2012 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
2013 struct tty_port *tport = &port->state->port;
2014 int ret;
2015 unsigned int size = 0;
2016 unsigned long flags;
2017
2018 if (!stm32_port->wakeup_src || !tty_port_initialized(tport))
2019 return 0;
2020
2021 /*
2022 * Enable low-power wake-up and wake-up irq if argument is set to
2023 * "enable", disable low-power wake-up and wake-up irq otherwise
2024 */
2025 if (enable) {
2026 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
2027 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
2028 mctrl_gpio_enable_irq_wake(stm32_port->gpios);
2029
2030 /*
2031 * When DMA is used for reception, it must be disabled before
2032 * entering low-power mode and re-enabled when exiting from
2033 * low-power mode.
2034 */
2035 if (stm32_port->rx_ch) {
2036 spin_lock_irqsave(&port->lock, flags);
2037 /* Poll data from DMA RX buffer if any */
2038 if (!stm32_usart_rx_dma_pause(stm32_port))
2039 size += stm32_usart_receive_chars(port, true);
2040 stm32_usart_rx_dma_terminate(stm32_port);
2041 uart_unlock_and_check_sysrq_irqrestore(port, flags);
2042 if (size)
2043 tty_flip_buffer_push(tport);
2044 }
2045
2046 /* Poll data from RX FIFO if any */
2047 stm32_usart_receive_chars(port, false);
2048 } else {
2049 if (stm32_port->rx_ch) {
2050 ret = stm32_usart_rx_dma_start_or_resume(port);
2051 if (ret)
2052 return ret;
2053 }
2054 mctrl_gpio_disable_irq_wake(stm32_port->gpios);
2055 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
2056 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
2057 }
2058
2059 return 0;
2060 }
2061
stm32_usart_serial_suspend(struct device * dev)2062 static int __maybe_unused stm32_usart_serial_suspend(struct device *dev)
2063 {
2064 struct uart_port *port = dev_get_drvdata(dev);
2065 int ret;
2066
2067 uart_suspend_port(&stm32_usart_driver, port);
2068
2069 if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
2070 ret = stm32_usart_serial_en_wakeup(port, true);
2071 if (ret)
2072 return ret;
2073 }
2074
2075 /*
2076 * When "no_console_suspend" is enabled, keep the pinctrl default state
2077 * and rely on bootloader stage to restore this state upon resume.
2078 * Otherwise, apply the idle or sleep states depending on wakeup
2079 * capabilities.
2080 */
2081 if (console_suspend_enabled || !uart_console(port)) {
2082 if (device_may_wakeup(dev) || device_wakeup_path(dev))
2083 pinctrl_pm_select_idle_state(dev);
2084 else
2085 pinctrl_pm_select_sleep_state(dev);
2086 }
2087
2088 return 0;
2089 }
2090
stm32_usart_serial_resume(struct device * dev)2091 static int __maybe_unused stm32_usart_serial_resume(struct device *dev)
2092 {
2093 struct uart_port *port = dev_get_drvdata(dev);
2094 int ret;
2095
2096 pinctrl_pm_select_default_state(dev);
2097
2098 if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
2099 ret = stm32_usart_serial_en_wakeup(port, false);
2100 if (ret)
2101 return ret;
2102 }
2103
2104 return uart_resume_port(&stm32_usart_driver, port);
2105 }
2106
stm32_usart_runtime_suspend(struct device * dev)2107 static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev)
2108 {
2109 struct uart_port *port = dev_get_drvdata(dev);
2110 struct stm32_port *stm32port = container_of(port,
2111 struct stm32_port, port);
2112
2113 clk_disable_unprepare(stm32port->clk);
2114
2115 return 0;
2116 }
2117
stm32_usart_runtime_resume(struct device * dev)2118 static int __maybe_unused stm32_usart_runtime_resume(struct device *dev)
2119 {
2120 struct uart_port *port = dev_get_drvdata(dev);
2121 struct stm32_port *stm32port = container_of(port,
2122 struct stm32_port, port);
2123
2124 return clk_prepare_enable(stm32port->clk);
2125 }
2126
2127 static const struct dev_pm_ops stm32_serial_pm_ops = {
2128 SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend,
2129 stm32_usart_runtime_resume, NULL)
2130 SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend,
2131 stm32_usart_serial_resume)
2132 };
2133
2134 static struct platform_driver stm32_serial_driver = {
2135 .probe = stm32_usart_serial_probe,
2136 .remove = stm32_usart_serial_remove,
2137 .driver = {
2138 .name = DRIVER_NAME,
2139 .pm = &stm32_serial_pm_ops,
2140 .of_match_table = of_match_ptr(stm32_match),
2141 },
2142 };
2143
stm32_usart_init(void)2144 static int __init stm32_usart_init(void)
2145 {
2146 static char banner[] __initdata = "STM32 USART driver initialized";
2147 int ret;
2148
2149 pr_info("%s\n", banner);
2150
2151 ret = uart_register_driver(&stm32_usart_driver);
2152 if (ret)
2153 return ret;
2154
2155 ret = platform_driver_register(&stm32_serial_driver);
2156 if (ret)
2157 uart_unregister_driver(&stm32_usart_driver);
2158
2159 return ret;
2160 }
2161
stm32_usart_exit(void)2162 static void __exit stm32_usart_exit(void)
2163 {
2164 platform_driver_unregister(&stm32_serial_driver);
2165 uart_unregister_driver(&stm32_usart_driver);
2166 }
2167
2168 module_init(stm32_usart_init);
2169 module_exit(stm32_usart_exit);
2170
2171 MODULE_ALIAS("platform:" DRIVER_NAME);
2172 MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver");
2173 MODULE_LICENSE("GPL v2");
2174