1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * 8250_dma.c - DMA Engine API support for 8250.c
4 *
5 * Copyright (C) 2013 Intel Corporation
6 */
7 #include <linux/tty.h>
8 #include <linux/tty_flip.h>
9 #include <linux/serial_reg.h>
10 #include <linux/dma-mapping.h>
11
12 #include "8250.h"
13
14 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
15 #define MAX_TX_BYTES 64
16 #define MAX_FIFO_SIZE 64
17 #define UART_RFL_16550A 0x21
18 #endif
19
_dma_tx_complete(void * param)20 static void _dma_tx_complete(void *param)
21 {
22 struct uart_8250_port *p = param;
23 struct uart_8250_dma *dma = p->dma;
24 struct circ_buf *xmit = &p->port.state->xmit;
25 unsigned long flags;
26 int ret;
27
28 dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr, UART_XMIT_SIZE, DMA_TO_DEVICE);
29
30 spin_lock_irqsave(&p->port.lock, flags);
31
32 dma->tx_running = 0;
33
34 xmit->tail += dma->tx_size;
35 xmit->tail &= UART_XMIT_SIZE - 1;
36 p->port.icount.tx += dma->tx_size;
37
38 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) {
39 uart_write_wakeup(&p->port);
40 }
41
42 ret = serial8250_tx_dma(p);
43 if (ret) {
44 serial8250_set_THRI(p);
45 }
46
47 spin_unlock_irqrestore(&p->port.lock, flags);
48 }
49
50 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
51
_dma_rx_complete(void * param)52 static void _dma_rx_complete(void *param)
53 {
54 struct uart_8250_port *p = param;
55 struct uart_8250_dma *dma = p->dma;
56 struct tty_port *tty_port = &p->port.state->port;
57 struct dma_tx_state state;
58 unsigned int count = 0, cur_index = 0;
59
60 dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
61 cur_index = dma->rx_size - state.residue;
62
63 if (cur_index == dma->rx_index) {
64 return;
65 } else if (cur_index > dma->rx_index) {
66 count = cur_index - dma->rx_index;
67 } else {
68 count = dma->rx_size - dma->rx_index;
69 }
70
71 tty_insert_flip_string(tty_port, dma->rx_buf + dma->rx_index, count);
72
73 if (cur_index < dma->rx_index) {
74 tty_insert_flip_string(tty_port, dma->rx_buf, cur_index);
75 count += cur_index;
76 }
77
78 p->port.icount.rx += count;
79 dma->rx_index = cur_index;
80 }
81
82 #else
83
_dma_rx_complete(void * param)84 static void _dma_rx_complete(void *param)
85 {
86 struct uart_8250_port *p = param;
87 struct uart_8250_dma *dma = p->dma;
88 struct tty_port *tty_port = &p->port.state->port;
89 struct dma_tx_state state;
90 int count;
91
92 dma->rx_running = 0;
93 dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
94
95 count = dma->rx_size - state.residue;
96
97 tty_insert_flip_string(tty_port, dma->rx_buf, count);
98 p->port.icount.rx += count;
99
100 tty_flip_buffer_push(tty_port);
101 }
102
103 #endif
104
serial8250_tx_dma(struct uart_8250_port * p)105 int serial8250_tx_dma(struct uart_8250_port *p)
106 {
107 struct uart_8250_dma *dma = p->dma;
108 struct circ_buf *xmit = &p->port.state->xmit;
109 struct dma_async_tx_descriptor *desc;
110 struct uart_port *up = &p->port;
111 int ret;
112
113 if (dma->tx_running) {
114 if (up->x_char) {
115 dmaengine_pause(dma->txchan);
116 uart_xchar_out(up, UART_TX);
117 dmaengine_resume(dma->txchan);
118 }
119 return 0;
120 } else if (up->x_char) {
121 uart_xchar_out(up, UART_TX);
122 }
123
124 if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
125 /* We have been called from _dma_tx_complete() */
126 serial8250_rpm_put_tx(p);
127 return 0;
128 }
129
130 dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
131 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
132 if (dma->tx_size < MAX_TX_BYTES) {
133 ret = -EBUSY;
134 goto err;
135 }
136 #endif
137 desc = dmaengine_prep_slave_single(dma->txchan, dma->tx_addr + xmit->tail, dma->tx_size, DMA_MEM_TO_DEV,
138 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
139 if (!desc) {
140 ret = -EBUSY;
141 goto err;
142 }
143
144 dma->tx_running = 1;
145 desc->callback = _dma_tx_complete;
146 desc->callback_param = p;
147
148 dma->tx_cookie = dmaengine_submit(desc);
149
150 dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr, UART_XMIT_SIZE, DMA_TO_DEVICE);
151
152 dma_async_issue_pending(dma->txchan);
153 if (dma->tx_err) {
154 dma->tx_err = 0;
155 serial8250_clear_THRI(p);
156 }
157 return 0;
158 err:
159 dma->tx_err = 1;
160 return ret;
161 }
162
163 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
164
serial8250_rx_dma(struct uart_8250_port * p)165 int serial8250_rx_dma(struct uart_8250_port *p)
166 {
167 unsigned int rfl, i = 0, fcr = 0, cur_index = 0;
168 unsigned char buf[MAX_FIFO_SIZE];
169 struct uart_port *port = &p->port;
170 struct tty_port *tty_port = &p->port.state->port;
171 struct dma_tx_state state;
172 struct uart_8250_dma *dma = p->dma;
173
174 fcr = UART_FCR_ENABLE_FIFO | UART_FCR_T_TRIG_10 | UART_FCR_R_TRIG_11;
175 serial_port_out(port, UART_FCR, fcr);
176
177 do {
178 dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
179 cur_index = dma->rx_size - state.residue;
180 } while (cur_index % dma->rxconf.src_maxburst);
181
182 rfl = serial_port_in(port, UART_RFL_16550A);
183 while (i < rfl) {
184 buf[i++] = serial_port_in(port, UART_RX);
185 }
186
187 _dma_rx_complete(p);
188
189 tty_insert_flip_string(tty_port, buf, i);
190 p->port.icount.rx += i;
191 tty_flip_buffer_push(tty_port);
192
193 if (fcr) {
194 serial_port_out(port, UART_FCR, p->fcr);
195 }
196 return 0;
197 }
198
serial8250_start_rx_dma(struct uart_8250_port * p)199 int serial8250_start_rx_dma(struct uart_8250_port *p)
200 {
201 struct uart_8250_dma *dma = p->dma;
202 struct dma_async_tx_descriptor *desc;
203
204 desc = dmaengine_prep_dma_cyclic(dma->rxchan, dma->rx_addr, dma->rx_size, dma->rx_size, DMA_DEV_TO_MEM,
205 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
206 if (!desc) {
207 return -EBUSY;
208 }
209
210 dma->rx_running = 1;
211 desc->callback = NULL;
212 desc->callback_param = NULL;
213
214 dma->rx_cookie = dmaengine_submit(desc);
215 dma_async_issue_pending(dma->rxchan);
216 dma->rx_index = 0;
217 return 0;
218 }
219
220 #else
221
serial8250_rx_dma(struct uart_8250_port * p)222 int serial8250_rx_dma(struct uart_8250_port *p)
223 {
224 struct uart_8250_dma *dma = p->dma;
225 struct dma_async_tx_descriptor *desc;
226
227 if (dma->rx_running) {
228 return 0;
229 }
230
231 desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr, dma->rx_size, DMA_DEV_TO_MEM,
232 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
233 if (!desc) {
234 return -EBUSY;
235 }
236
237 dma->rx_running = 1;
238 desc->callback = _dma_rx_complete;
239 desc->callback_param = p;
240
241 dma->rx_cookie = dmaengine_submit(desc);
242
243 dma_async_issue_pending(dma->rxchan);
244
245 return 0;
246 }
247
248 #endif
249
serial8250_rx_dma_flush(struct uart_8250_port * p)250 void serial8250_rx_dma_flush(struct uart_8250_port *p)
251 {
252 struct uart_8250_dma *dma = p->dma;
253
254 if (dma->rx_running) {
255 dmaengine_pause(dma->rxchan);
256 _dma_rx_complete(p);
257 dmaengine_terminate_async(dma->rxchan);
258 }
259 }
260 EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
261
serial8250_request_dma(struct uart_8250_port * p)262 int serial8250_request_dma(struct uart_8250_port *p)
263 {
264 struct uart_8250_dma *dma = p->dma;
265 phys_addr_t rx_dma_addr = dma->rx_dma_addr ? dma->rx_dma_addr : p->port.mapbase;
266 phys_addr_t tx_dma_addr = dma->tx_dma_addr ? dma->tx_dma_addr : p->port.mapbase;
267 dma_cap_mask_t mask;
268 struct dma_slave_caps caps;
269 int ret;
270
271 /* Default slave configuration parameters */
272 dma->rxconf.direction = DMA_DEV_TO_MEM;
273 dma->rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
274 dma->rxconf.src_addr = rx_dma_addr + UART_RX;
275 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
276 if ((p->port.fifosize / 0x4) < 0x10) {
277 dma->rxconf.src_maxburst = p->port.fifosize / 0x4;
278 } else {
279 dma->rxconf.src_maxburst = 0x10;
280 }
281 #endif
282
283 dma->txconf.direction = DMA_MEM_TO_DEV;
284 dma->txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
285 dma->txconf.dst_addr = tx_dma_addr + UART_TX;
286 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
287 dma->txconf.dst_maxburst = 0x10;
288 #endif
289 dma_cap_zero(mask);
290 dma_cap_set(DMA_SLAVE, mask);
291
292 /* Get a channel for RX */
293 dma->rxchan = dma_request_slave_channel_compat(mask, dma->fn, dma->rx_param, p->port.dev, "rx");
294 if (!dma->rxchan) {
295 return -ENODEV;
296 }
297
298 /* 8250 rx dma requires dmaengine driver to support pause/terminate */
299 ret = dma_get_slave_caps(dma->rxchan, &caps);
300 if (ret) {
301 goto release_rx;
302 }
303 if (!caps.cmd_pause || !caps.cmd_terminate || caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
304 ret = -EINVAL;
305 goto release_rx;
306 }
307
308 dmaengine_slave_config(dma->rxchan, &dma->rxconf);
309
310 /* RX buffer */
311 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
312 if (!dma->rx_size) {
313 dma->rx_size = PAGE_SIZE * 0x2;
314 }
315 #else
316 if (!dma->rx_size) {
317 dma->rx_size = PAGE_SIZE;
318 }
319 #endif
320
321 dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size, &dma->rx_addr, GFP_KERNEL);
322 if (!dma->rx_buf) {
323 ret = -ENOMEM;
324 goto release_rx;
325 }
326
327 /* Get a channel for TX */
328 dma->txchan = dma_request_slave_channel_compat(mask, dma->fn, dma->tx_param, p->port.dev, "tx");
329 if (dma->txchan) {
330 dmaengine_slave_config(dma->txchan, &dma->txconf);
331
332 /* TX buffer */
333 dma->tx_addr = dma_map_single(dma->txchan->device->dev, p->port.state->xmit.buf, UART_XMIT_SIZE, DMA_TO_DEVICE);
334 if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
335 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf, dma->rx_addr);
336 dma_release_channel(dma->txchan);
337 dma->txchan = NULL;
338 }
339
340 dev_info_ratelimited(p->port.dev, "got rx and tx dma channels\n");
341 } else {
342 dev_info_ratelimited(p->port.dev, "got rx dma channels only\n");
343 }
344
345 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
346 /* start dma for rx */
347 serial8250_start_rx_dma(p);
348 #endif
349 return 0;
350 release_rx:
351 dma_release_channel(dma->rxchan);
352 return ret;
353 }
354 EXPORT_SYMBOL_GPL(serial8250_request_dma);
355
serial8250_release_dma(struct uart_8250_port * p)356 void serial8250_release_dma(struct uart_8250_port *p)
357 {
358 struct uart_8250_dma *dma = p->dma;
359
360 if (!dma) {
361 return;
362 }
363
364 /* Release RX resources */
365 dmaengine_terminate_sync(dma->rxchan);
366 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf, dma->rx_addr);
367 dma_release_channel(dma->rxchan);
368 dma->rxchan = NULL;
369 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
370 dma->rx_running = 0;
371 #endif
372 /* Release TX resources */
373 if (dma->txchan) {
374 dmaengine_terminate_all(dma->txchan);
375 dma_unmap_single(dma->txchan->device->dev, dma->tx_addr, UART_XMIT_SIZE, DMA_TO_DEVICE);
376 dma_release_channel(dma->txchan);
377 dma->txchan = NULL;
378 dma->tx_running = 0;
379 }
380 dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
381 }
382 EXPORT_SYMBOL_GPL(serial8250_release_dma);
383