1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * 8250_dma.c - DMA Engine API support for 8250.c
4 *
5 * Copyright (C) 2013 Intel Corporation
6 */
7 #include <linux/tty.h>
8 #include <linux/tty_flip.h>
9 #include <linux/serial_reg.h>
10 #include <linux/dma-mapping.h>
11
12 #include "8250.h"
13
__dma_tx_complete(void * param)14 static void __dma_tx_complete(void *param)
15 {
16 struct uart_8250_port *p = param;
17 struct uart_8250_dma *dma = p->dma;
18 struct circ_buf *xmit = &p->port.state->xmit;
19 unsigned long flags;
20 int ret;
21
22 dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
23 UART_XMIT_SIZE, DMA_TO_DEVICE);
24
25 spin_lock_irqsave(&p->port.lock, flags);
26
27 dma->tx_running = 0;
28
29 xmit->tail += dma->tx_size;
30 xmit->tail &= UART_XMIT_SIZE - 1;
31 p->port.icount.tx += dma->tx_size;
32
33 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
34 uart_write_wakeup(&p->port);
35
36 ret = serial8250_tx_dma(p);
37 if (ret)
38 serial8250_set_THRI(p);
39
40 spin_unlock_irqrestore(&p->port.lock, flags);
41 }
42
__dma_rx_complete(void * param)43 static void __dma_rx_complete(void *param)
44 {
45 struct uart_8250_port *p = param;
46 struct uart_8250_dma *dma = p->dma;
47 struct tty_port *tty_port = &p->port.state->port;
48 struct dma_tx_state state;
49 enum dma_status dma_status;
50 int count;
51
52 /*
53 * New DMA Rx can be started during the completion handler before it
54 * could acquire port's lock and it might still be ongoing. Don't to
55 * anything in such case.
56 */
57 dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
58 if (dma_status == DMA_IN_PROGRESS)
59 return;
60
61 count = dma->rx_size - state.residue;
62
63 tty_insert_flip_string(tty_port, dma->rx_buf, count);
64 p->port.icount.rx += count;
65 dma->rx_running = 0;
66
67 tty_flip_buffer_push(tty_port);
68 }
69
dma_rx_complete(void * param)70 static void dma_rx_complete(void *param)
71 {
72 struct uart_8250_port *p = param;
73 struct uart_8250_dma *dma = p->dma;
74 unsigned long flags;
75
76 spin_lock_irqsave(&p->port.lock, flags);
77 if (dma->rx_running)
78 __dma_rx_complete(p);
79 spin_unlock_irqrestore(&p->port.lock, flags);
80 }
81
serial8250_tx_dma(struct uart_8250_port * p)82 int serial8250_tx_dma(struct uart_8250_port *p)
83 {
84 struct uart_8250_dma *dma = p->dma;
85 struct circ_buf *xmit = &p->port.state->xmit;
86 struct dma_async_tx_descriptor *desc;
87 struct uart_port *up = &p->port;
88 int ret;
89
90 if (dma->tx_running) {
91 if (up->x_char) {
92 dmaengine_pause(dma->txchan);
93 uart_xchar_out(up, UART_TX);
94 dmaengine_resume(dma->txchan);
95 }
96 return 0;
97 } else if (up->x_char) {
98 uart_xchar_out(up, UART_TX);
99 }
100
101 if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
102 /* We have been called from __dma_tx_complete() */
103 serial8250_rpm_put_tx(p);
104 return 0;
105 }
106
107 dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
108
109 desc = dmaengine_prep_slave_single(dma->txchan,
110 dma->tx_addr + xmit->tail,
111 dma->tx_size, DMA_MEM_TO_DEV,
112 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
113 if (!desc) {
114 ret = -EBUSY;
115 goto err;
116 }
117
118 dma->tx_running = 1;
119 desc->callback = __dma_tx_complete;
120 desc->callback_param = p;
121
122 dma->tx_cookie = dmaengine_submit(desc);
123
124 dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
125 UART_XMIT_SIZE, DMA_TO_DEVICE);
126
127 dma_async_issue_pending(dma->txchan);
128 if (dma->tx_err) {
129 dma->tx_err = 0;
130 serial8250_clear_THRI(p);
131 }
132 return 0;
133 err:
134 dma->tx_err = 1;
135 return ret;
136 }
137
serial8250_rx_dma(struct uart_8250_port * p)138 int serial8250_rx_dma(struct uart_8250_port *p)
139 {
140 struct uart_8250_dma *dma = p->dma;
141 struct dma_async_tx_descriptor *desc;
142
143 if (dma->rx_running)
144 return 0;
145
146 desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
147 dma->rx_size, DMA_DEV_TO_MEM,
148 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
149 if (!desc)
150 return -EBUSY;
151
152 dma->rx_running = 1;
153 desc->callback = dma_rx_complete;
154 desc->callback_param = p;
155
156 dma->rx_cookie = dmaengine_submit(desc);
157
158 dma_async_issue_pending(dma->rxchan);
159
160 return 0;
161 }
162
serial8250_rx_dma_flush(struct uart_8250_port * p)163 void serial8250_rx_dma_flush(struct uart_8250_port *p)
164 {
165 struct uart_8250_dma *dma = p->dma;
166
167 if (dma->rx_running) {
168 dmaengine_pause(dma->rxchan);
169 __dma_rx_complete(p);
170 dmaengine_terminate_async(dma->rxchan);
171 }
172 }
173 EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
174
serial8250_request_dma(struct uart_8250_port * p)175 int serial8250_request_dma(struct uart_8250_port *p)
176 {
177 struct uart_8250_dma *dma = p->dma;
178 phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
179 dma->rx_dma_addr : p->port.mapbase;
180 phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
181 dma->tx_dma_addr : p->port.mapbase;
182 dma_cap_mask_t mask;
183 struct dma_slave_caps caps;
184 int ret;
185
186 /* Default slave configuration parameters */
187 dma->rxconf.direction = DMA_DEV_TO_MEM;
188 dma->rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
189 dma->rxconf.src_addr = rx_dma_addr + UART_RX;
190
191 dma->txconf.direction = DMA_MEM_TO_DEV;
192 dma->txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
193 dma->txconf.dst_addr = tx_dma_addr + UART_TX;
194
195 dma_cap_zero(mask);
196 dma_cap_set(DMA_SLAVE, mask);
197
198 /* Get a channel for RX */
199 dma->rxchan = dma_request_slave_channel_compat(mask,
200 dma->fn, dma->rx_param,
201 p->port.dev, "rx");
202 if (!dma->rxchan)
203 return -ENODEV;
204
205 /* 8250 rx dma requires dmaengine driver to support pause/terminate */
206 ret = dma_get_slave_caps(dma->rxchan, &caps);
207 if (ret)
208 goto release_rx;
209 if (!caps.cmd_pause || !caps.cmd_terminate ||
210 caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
211 ret = -EINVAL;
212 goto release_rx;
213 }
214
215 dmaengine_slave_config(dma->rxchan, &dma->rxconf);
216
217 /* Get a channel for TX */
218 dma->txchan = dma_request_slave_channel_compat(mask,
219 dma->fn, dma->tx_param,
220 p->port.dev, "tx");
221 if (!dma->txchan) {
222 ret = -ENODEV;
223 goto release_rx;
224 }
225
226 /* 8250 tx dma requires dmaengine driver to support terminate */
227 ret = dma_get_slave_caps(dma->txchan, &caps);
228 if (ret)
229 goto err;
230 if (!caps.cmd_terminate) {
231 ret = -EINVAL;
232 goto err;
233 }
234
235 dmaengine_slave_config(dma->txchan, &dma->txconf);
236
237 /* RX buffer */
238 if (!dma->rx_size)
239 dma->rx_size = PAGE_SIZE;
240
241 dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
242 &dma->rx_addr, GFP_KERNEL);
243 if (!dma->rx_buf) {
244 ret = -ENOMEM;
245 goto err;
246 }
247
248 /* TX buffer */
249 dma->tx_addr = dma_map_single(dma->txchan->device->dev,
250 p->port.state->xmit.buf,
251 UART_XMIT_SIZE,
252 DMA_TO_DEVICE);
253 if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
254 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
255 dma->rx_buf, dma->rx_addr);
256 ret = -ENOMEM;
257 goto err;
258 }
259
260 dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
261
262 return 0;
263 err:
264 dma_release_channel(dma->txchan);
265 release_rx:
266 dma_release_channel(dma->rxchan);
267 return ret;
268 }
269 EXPORT_SYMBOL_GPL(serial8250_request_dma);
270
serial8250_release_dma(struct uart_8250_port * p)271 void serial8250_release_dma(struct uart_8250_port *p)
272 {
273 struct uart_8250_dma *dma = p->dma;
274
275 if (!dma)
276 return;
277
278 /* Release RX resources */
279 dmaengine_terminate_sync(dma->rxchan);
280 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
281 dma->rx_addr);
282 dma_release_channel(dma->rxchan);
283 dma->rxchan = NULL;
284
285 /* Release TX resources */
286 dmaengine_terminate_sync(dma->txchan);
287 dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
288 UART_XMIT_SIZE, DMA_TO_DEVICE);
289 dma_release_channel(dma->txchan);
290 dma->txchan = NULL;
291 dma->tx_running = 0;
292
293 dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
294 }
295 EXPORT_SYMBOL_GPL(serial8250_release_dma);
296