1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4 * Synopsys DesignWare eDMA core driver
5 *
6 * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
7 */
8
9 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/kernel.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/dmaengine.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/dma/edma.h>
18 #include <linux/dma-mapping.h>
19
20 #include "dw-edma-core.h"
21 #include "dw-edma-v0-core.h"
22 #include "../dmaengine.h"
23 #include "../virt-dma.h"
24
25 static inline
dchan2dev(struct dma_chan * dchan)26 struct device *dchan2dev(struct dma_chan *dchan)
27 {
28 return &dchan->dev->device;
29 }
30
31 static inline
chan2dev(struct dw_edma_chan * chan)32 struct device *chan2dev(struct dw_edma_chan *chan)
33 {
34 return &chan->vc.chan.dev->device;
35 }
36
37 static inline
vd2dw_edma_desc(struct virt_dma_desc * vd)38 struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
39 {
40 return container_of(vd, struct dw_edma_desc, vd);
41 }
42
dw_edma_alloc_burst(struct dw_edma_chunk * chunk)43 static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
44 {
45 struct dw_edma_burst *burst;
46
47 burst = kzalloc(sizeof(*burst), GFP_NOWAIT);
48 if (unlikely(!burst))
49 return NULL;
50
51 INIT_LIST_HEAD(&burst->list);
52 if (chunk->burst) {
53 /* Create and add new element into the linked list */
54 chunk->bursts_alloc++;
55 list_add_tail(&burst->list, &chunk->burst->list);
56 } else {
57 /* List head */
58 chunk->bursts_alloc = 0;
59 chunk->burst = burst;
60 }
61
62 return burst;
63 }
64
dw_edma_alloc_chunk(struct dw_edma_desc * desc)65 static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
66 {
67 struct dw_edma_chan *chan = desc->chan;
68 struct dw_edma *dw = chan->chip->dw;
69 struct dw_edma_chunk *chunk;
70
71 chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
72 if (unlikely(!chunk))
73 return NULL;
74
75 INIT_LIST_HEAD(&chunk->list);
76 chunk->chan = chan;
77 /* Toggling change bit (CB) in each chunk, this is a mechanism to
78 * inform the eDMA HW block that this is a new linked list ready
79 * to be consumed.
80 * - Odd chunks originate CB equal to 0
81 * - Even chunks originate CB equal to 1
82 */
83 chunk->cb = !(desc->chunks_alloc % 2);
84 chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off;
85 chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off;
86
87 if (desc->chunk) {
88 /* Create and add new element into the linked list */
89 if (!dw_edma_alloc_burst(chunk)) {
90 kfree(chunk);
91 return NULL;
92 }
93 desc->chunks_alloc++;
94 list_add_tail(&chunk->list, &desc->chunk->list);
95 } else {
96 /* List head */
97 chunk->burst = NULL;
98 desc->chunks_alloc = 0;
99 desc->chunk = chunk;
100 }
101
102 return chunk;
103 }
104
dw_edma_alloc_desc(struct dw_edma_chan * chan)105 static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan)
106 {
107 struct dw_edma_desc *desc;
108
109 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
110 if (unlikely(!desc))
111 return NULL;
112
113 desc->chan = chan;
114 if (!dw_edma_alloc_chunk(desc)) {
115 kfree(desc);
116 return NULL;
117 }
118
119 return desc;
120 }
121
dw_edma_free_burst(struct dw_edma_chunk * chunk)122 static void dw_edma_free_burst(struct dw_edma_chunk *chunk)
123 {
124 struct dw_edma_burst *child, *_next;
125
126 /* Remove all the list elements */
127 list_for_each_entry_safe(child, _next, &chunk->burst->list, list) {
128 list_del(&child->list);
129 kfree(child);
130 chunk->bursts_alloc--;
131 }
132
133 /* Remove the list head */
134 kfree(child);
135 chunk->burst = NULL;
136 }
137
dw_edma_free_chunk(struct dw_edma_desc * desc)138 static void dw_edma_free_chunk(struct dw_edma_desc *desc)
139 {
140 struct dw_edma_chunk *child, *_next;
141
142 if (!desc->chunk)
143 return;
144
145 /* Remove all the list elements */
146 list_for_each_entry_safe(child, _next, &desc->chunk->list, list) {
147 dw_edma_free_burst(child);
148 list_del(&child->list);
149 kfree(child);
150 desc->chunks_alloc--;
151 }
152
153 /* Remove the list head */
154 kfree(child);
155 desc->chunk = NULL;
156 }
157
dw_edma_free_desc(struct dw_edma_desc * desc)158 static void dw_edma_free_desc(struct dw_edma_desc *desc)
159 {
160 dw_edma_free_chunk(desc);
161 kfree(desc);
162 }
163
vchan_free_desc(struct virt_dma_desc * vdesc)164 static void vchan_free_desc(struct virt_dma_desc *vdesc)
165 {
166 dw_edma_free_desc(vd2dw_edma_desc(vdesc));
167 }
168
dw_edma_start_transfer(struct dw_edma_chan * chan)169 static int dw_edma_start_transfer(struct dw_edma_chan *chan)
170 {
171 struct dw_edma_chunk *child;
172 struct dw_edma_desc *desc;
173 struct virt_dma_desc *vd;
174
175 vd = vchan_next_desc(&chan->vc);
176 if (!vd)
177 return 0;
178
179 desc = vd2dw_edma_desc(vd);
180 if (!desc)
181 return 0;
182
183 child = list_first_entry_or_null(&desc->chunk->list,
184 struct dw_edma_chunk, list);
185 if (!child)
186 return 0;
187
188 dw_edma_v0_core_start(child, !desc->xfer_sz);
189 desc->xfer_sz += child->ll_region.sz;
190 dw_edma_free_burst(child);
191 list_del(&child->list);
192 kfree(child);
193 desc->chunks_alloc--;
194
195 return 1;
196 }
197
dw_edma_device_config(struct dma_chan * dchan,struct dma_slave_config * config)198 static int dw_edma_device_config(struct dma_chan *dchan,
199 struct dma_slave_config *config)
200 {
201 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
202
203 memcpy(&chan->config, config, sizeof(*config));
204 chan->configured = true;
205
206 return 0;
207 }
208
dw_edma_device_pause(struct dma_chan * dchan)209 static int dw_edma_device_pause(struct dma_chan *dchan)
210 {
211 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
212 int err = 0;
213
214 if (!chan->configured)
215 err = -EPERM;
216 else if (chan->status != EDMA_ST_BUSY)
217 err = -EPERM;
218 else if (chan->request != EDMA_REQ_NONE)
219 err = -EPERM;
220 else
221 chan->request = EDMA_REQ_PAUSE;
222
223 return err;
224 }
225
dw_edma_device_resume(struct dma_chan * dchan)226 static int dw_edma_device_resume(struct dma_chan *dchan)
227 {
228 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
229 int err = 0;
230
231 if (!chan->configured) {
232 err = -EPERM;
233 } else if (chan->status != EDMA_ST_PAUSE) {
234 err = -EPERM;
235 } else if (chan->request != EDMA_REQ_NONE) {
236 err = -EPERM;
237 } else {
238 chan->status = EDMA_ST_BUSY;
239 dw_edma_start_transfer(chan);
240 }
241
242 return err;
243 }
244
dw_edma_device_terminate_all(struct dma_chan * dchan)245 static int dw_edma_device_terminate_all(struct dma_chan *dchan)
246 {
247 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
248 int err = 0;
249 LIST_HEAD(head);
250
251 if (!chan->configured) {
252 /* Do nothing */
253 } else if (chan->status == EDMA_ST_PAUSE) {
254 chan->status = EDMA_ST_IDLE;
255 chan->configured = false;
256 } else if (chan->status == EDMA_ST_IDLE) {
257 chan->configured = false;
258 } else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) {
259 /*
260 * The channel is in a false BUSY state, probably didn't
261 * receive or lost an interrupt
262 */
263 chan->status = EDMA_ST_IDLE;
264 chan->configured = false;
265 } else if (chan->request > EDMA_REQ_PAUSE) {
266 err = -EPERM;
267 } else {
268 chan->request = EDMA_REQ_STOP;
269 }
270
271 return err;
272 }
273
dw_edma_device_issue_pending(struct dma_chan * dchan)274 static void dw_edma_device_issue_pending(struct dma_chan *dchan)
275 {
276 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
277 unsigned long flags;
278
279 if (!chan->configured)
280 return;
281
282 spin_lock_irqsave(&chan->vc.lock, flags);
283 if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE &&
284 chan->status == EDMA_ST_IDLE) {
285 chan->status = EDMA_ST_BUSY;
286 dw_edma_start_transfer(chan);
287 }
288 spin_unlock_irqrestore(&chan->vc.lock, flags);
289 }
290
291 static enum dma_status
dw_edma_device_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * txstate)292 dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
293 struct dma_tx_state *txstate)
294 {
295 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
296 struct dw_edma_desc *desc;
297 struct virt_dma_desc *vd;
298 unsigned long flags;
299 enum dma_status ret;
300 u32 residue = 0;
301
302 ret = dma_cookie_status(dchan, cookie, txstate);
303 if (ret == DMA_COMPLETE)
304 return ret;
305
306 if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE)
307 ret = DMA_PAUSED;
308
309 if (!txstate)
310 goto ret_residue;
311
312 spin_lock_irqsave(&chan->vc.lock, flags);
313 vd = vchan_find_desc(&chan->vc, cookie);
314 if (vd) {
315 desc = vd2dw_edma_desc(vd);
316 if (desc)
317 residue = desc->alloc_sz - desc->xfer_sz;
318 }
319 spin_unlock_irqrestore(&chan->vc.lock, flags);
320
321 ret_residue:
322 dma_set_residue(txstate, residue);
323
324 return ret;
325 }
326
327 static struct dma_async_tx_descriptor *
dw_edma_device_transfer(struct dw_edma_transfer * xfer)328 dw_edma_device_transfer(struct dw_edma_transfer *xfer)
329 {
330 struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
331 enum dma_transfer_direction dir = xfer->direction;
332 phys_addr_t src_addr, dst_addr;
333 struct scatterlist *sg = NULL;
334 struct dw_edma_chunk *chunk;
335 struct dw_edma_burst *burst;
336 struct dw_edma_desc *desc;
337 u32 cnt;
338 int i;
339
340 if (!chan->configured)
341 return NULL;
342
343 switch (chan->config.direction) {
344 case DMA_DEV_TO_MEM: /* local dma */
345 if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)
346 break;
347 return NULL;
348 case DMA_MEM_TO_DEV: /* local dma */
349 if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE)
350 break;
351 return NULL;
352 default: /* remote dma */
353 if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ)
354 break;
355 if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE)
356 break;
357 return NULL;
358 }
359
360 if (xfer->cyclic) {
361 if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
362 return NULL;
363 } else {
364 if (xfer->xfer.sg.len < 1)
365 return NULL;
366 }
367
368 desc = dw_edma_alloc_desc(chan);
369 if (unlikely(!desc))
370 goto err_alloc;
371
372 chunk = dw_edma_alloc_chunk(desc);
373 if (unlikely(!chunk))
374 goto err_alloc;
375
376 src_addr = chan->config.src_addr;
377 dst_addr = chan->config.dst_addr;
378
379 if (xfer->cyclic) {
380 cnt = xfer->xfer.cyclic.cnt;
381 } else {
382 cnt = xfer->xfer.sg.len;
383 sg = xfer->xfer.sg.sgl;
384 }
385
386 for (i = 0; i < cnt; i++) {
387 if (!xfer->cyclic && !sg)
388 break;
389
390 if (chunk->bursts_alloc == chan->ll_max) {
391 chunk = dw_edma_alloc_chunk(desc);
392 if (unlikely(!chunk))
393 goto err_alloc;
394 }
395
396 burst = dw_edma_alloc_burst(chunk);
397 if (unlikely(!burst))
398 goto err_alloc;
399
400 if (xfer->cyclic)
401 burst->sz = xfer->xfer.cyclic.len;
402 else
403 burst->sz = sg_dma_len(sg);
404
405 chunk->ll_region.sz += burst->sz;
406 desc->alloc_sz += burst->sz;
407
408 if (dir == DMA_DEV_TO_MEM) {
409 burst->sar = src_addr;
410 if (xfer->cyclic) {
411 burst->dar = xfer->xfer.cyclic.paddr;
412 } else {
413 burst->dar = dst_addr;
414 /* Unlike the typical assumption by other
415 * drivers/IPs the peripheral memory isn't
416 * a FIFO memory, in this case, it's a
417 * linear memory and that why the source
418 * and destination addresses are increased
419 * by the same portion (data length)
420 */
421 }
422 } else {
423 burst->dar = dst_addr;
424 if (xfer->cyclic) {
425 burst->sar = xfer->xfer.cyclic.paddr;
426 } else {
427 burst->sar = src_addr;
428 /* Unlike the typical assumption by other
429 * drivers/IPs the peripheral memory isn't
430 * a FIFO memory, in this case, it's a
431 * linear memory and that why the source
432 * and destination addresses are increased
433 * by the same portion (data length)
434 */
435 }
436 }
437
438 if (!xfer->cyclic) {
439 src_addr += sg_dma_len(sg);
440 dst_addr += sg_dma_len(sg);
441 sg = sg_next(sg);
442 }
443 }
444
445 return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
446
447 err_alloc:
448 if (desc)
449 dw_edma_free_desc(desc);
450
451 return NULL;
452 }
453
454 static struct dma_async_tx_descriptor *
dw_edma_device_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int len,enum dma_transfer_direction direction,unsigned long flags,void * context)455 dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
456 unsigned int len,
457 enum dma_transfer_direction direction,
458 unsigned long flags, void *context)
459 {
460 struct dw_edma_transfer xfer;
461
462 xfer.dchan = dchan;
463 xfer.direction = direction;
464 xfer.xfer.sg.sgl = sgl;
465 xfer.xfer.sg.len = len;
466 xfer.flags = flags;
467 xfer.cyclic = false;
468
469 return dw_edma_device_transfer(&xfer);
470 }
471
472 static struct dma_async_tx_descriptor *
dw_edma_device_prep_dma_cyclic(struct dma_chan * dchan,dma_addr_t paddr,size_t len,size_t count,enum dma_transfer_direction direction,unsigned long flags)473 dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
474 size_t len, size_t count,
475 enum dma_transfer_direction direction,
476 unsigned long flags)
477 {
478 struct dw_edma_transfer xfer;
479
480 xfer.dchan = dchan;
481 xfer.direction = direction;
482 xfer.xfer.cyclic.paddr = paddr;
483 xfer.xfer.cyclic.len = len;
484 xfer.xfer.cyclic.cnt = count;
485 xfer.flags = flags;
486 xfer.cyclic = true;
487
488 return dw_edma_device_transfer(&xfer);
489 }
490
dw_edma_done_interrupt(struct dw_edma_chan * chan)491 static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
492 {
493 struct dw_edma_desc *desc;
494 struct virt_dma_desc *vd;
495 unsigned long flags;
496
497 dw_edma_v0_core_clear_done_int(chan);
498
499 spin_lock_irqsave(&chan->vc.lock, flags);
500 vd = vchan_next_desc(&chan->vc);
501 if (vd) {
502 switch (chan->request) {
503 case EDMA_REQ_NONE:
504 desc = vd2dw_edma_desc(vd);
505 if (!desc->chunks_alloc) {
506 list_del(&vd->node);
507 vchan_cookie_complete(vd);
508 }
509
510 /* Continue transferring if there are remaining chunks or issued requests.
511 */
512 chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE;
513 break;
514
515 case EDMA_REQ_STOP:
516 list_del(&vd->node);
517 vchan_cookie_complete(vd);
518 chan->request = EDMA_REQ_NONE;
519 chan->status = EDMA_ST_IDLE;
520 break;
521
522 case EDMA_REQ_PAUSE:
523 chan->request = EDMA_REQ_NONE;
524 chan->status = EDMA_ST_PAUSE;
525 break;
526
527 default:
528 break;
529 }
530 }
531 spin_unlock_irqrestore(&chan->vc.lock, flags);
532 }
533
dw_edma_abort_interrupt(struct dw_edma_chan * chan)534 static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
535 {
536 struct virt_dma_desc *vd;
537 unsigned long flags;
538
539 dw_edma_v0_core_clear_abort_int(chan);
540
541 spin_lock_irqsave(&chan->vc.lock, flags);
542 vd = vchan_next_desc(&chan->vc);
543 if (vd) {
544 list_del(&vd->node);
545 vchan_cookie_complete(vd);
546 }
547 spin_unlock_irqrestore(&chan->vc.lock, flags);
548 chan->request = EDMA_REQ_NONE;
549 chan->status = EDMA_ST_IDLE;
550 }
551
dw_edma_interrupt(int irq,void * data,bool write)552 static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write)
553 {
554 struct dw_edma_irq *dw_irq = data;
555 struct dw_edma *dw = dw_irq->dw;
556 unsigned long total, pos, val;
557 unsigned long off;
558 u32 mask;
559
560 if (write) {
561 total = dw->wr_ch_cnt;
562 off = 0;
563 mask = dw_irq->wr_mask;
564 } else {
565 total = dw->rd_ch_cnt;
566 off = dw->wr_ch_cnt;
567 mask = dw_irq->rd_mask;
568 }
569
570 val = dw_edma_v0_core_status_done_int(dw, write ?
571 EDMA_DIR_WRITE :
572 EDMA_DIR_READ);
573 val &= mask;
574 for_each_set_bit(pos, &val, total) {
575 struct dw_edma_chan *chan = &dw->chan[pos + off];
576
577 dw_edma_done_interrupt(chan);
578 }
579
580 val = dw_edma_v0_core_status_abort_int(dw, write ?
581 EDMA_DIR_WRITE :
582 EDMA_DIR_READ);
583 val &= mask;
584 for_each_set_bit(pos, &val, total) {
585 struct dw_edma_chan *chan = &dw->chan[pos + off];
586
587 dw_edma_abort_interrupt(chan);
588 }
589
590 return IRQ_HANDLED;
591 }
592
dw_edma_interrupt_write(int irq,void * data)593 static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
594 {
595 return dw_edma_interrupt(irq, data, true);
596 }
597
dw_edma_interrupt_read(int irq,void * data)598 static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
599 {
600 return dw_edma_interrupt(irq, data, false);
601 }
602
dw_edma_interrupt_common(int irq,void * data)603 static irqreturn_t dw_edma_interrupt_common(int irq, void *data)
604 {
605 dw_edma_interrupt(irq, data, true);
606 dw_edma_interrupt(irq, data, false);
607
608 return IRQ_HANDLED;
609 }
610
dw_edma_alloc_chan_resources(struct dma_chan * dchan)611 static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
612 {
613 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
614
615 if (chan->status != EDMA_ST_IDLE)
616 return -EBUSY;
617
618 pm_runtime_get(chan->chip->dev);
619
620 return 0;
621 }
622
dw_edma_free_chan_resources(struct dma_chan * dchan)623 static void dw_edma_free_chan_resources(struct dma_chan *dchan)
624 {
625 unsigned long timeout = jiffies + msecs_to_jiffies(5000);
626 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
627 int ret;
628
629 while (time_before(jiffies, timeout)) {
630 ret = dw_edma_device_terminate_all(dchan);
631 if (!ret)
632 break;
633
634 if (time_after_eq(jiffies, timeout))
635 return;
636
637 cpu_relax();
638 }
639
640 pm_runtime_put(chan->chip->dev);
641 }
642
dw_edma_channel_setup(struct dw_edma_chip * chip,bool write,u32 wr_alloc,u32 rd_alloc)643 static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
644 u32 wr_alloc, u32 rd_alloc)
645 {
646 struct dw_edma_region *dt_region;
647 struct device *dev = chip->dev;
648 struct dw_edma *dw = chip->dw;
649 struct dw_edma_chan *chan;
650 size_t ll_chunk, dt_chunk;
651 struct dw_edma_irq *irq;
652 struct dma_device *dma;
653 u32 i, j, cnt, ch_cnt;
654 u32 alloc, off_alloc;
655 int err = 0;
656 u32 pos;
657
658 ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
659 ll_chunk = dw->ll_region.sz;
660 dt_chunk = dw->dt_region.sz;
661
662 /* Calculate linked list chunk for each channel */
663 ll_chunk /= roundup_pow_of_two(ch_cnt);
664
665 /* Calculate linked list chunk for each channel */
666 dt_chunk /= roundup_pow_of_two(ch_cnt);
667
668 if (write) {
669 i = 0;
670 cnt = dw->wr_ch_cnt;
671 dma = &dw->wr_edma;
672 alloc = wr_alloc;
673 off_alloc = 0;
674 } else {
675 i = dw->wr_ch_cnt;
676 cnt = dw->rd_ch_cnt;
677 dma = &dw->rd_edma;
678 alloc = rd_alloc;
679 off_alloc = wr_alloc;
680 }
681
682 INIT_LIST_HEAD(&dma->channels);
683 for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) {
684 chan = &dw->chan[i];
685
686 dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL);
687 if (!dt_region)
688 return -ENOMEM;
689
690 chan->vc.chan.private = dt_region;
691
692 chan->chip = chip;
693 chan->id = j;
694 chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
695 chan->configured = false;
696 chan->request = EDMA_REQ_NONE;
697 chan->status = EDMA_ST_IDLE;
698
699 chan->ll_off = (ll_chunk * i);
700 chan->ll_max = (ll_chunk / EDMA_LL_SZ) - 1;
701
702 chan->dt_off = (dt_chunk * i);
703
704 dev_vdbg(dev, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n",
705 write ? "write" : "read", j,
706 chan->ll_off, chan->ll_max);
707
708 if (dw->nr_irqs == 1)
709 pos = 0;
710 else
711 pos = off_alloc + (j % alloc);
712
713 irq = &dw->irq[pos];
714
715 if (write)
716 irq->wr_mask |= BIT(j);
717 else
718 irq->rd_mask |= BIT(j);
719
720 irq->dw = dw;
721 memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
722
723 dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
724 write ? "write" : "read", j,
725 chan->msi.address_hi, chan->msi.address_lo,
726 chan->msi.data);
727
728 chan->vc.desc_free = vchan_free_desc;
729 vchan_init(&chan->vc, dma);
730
731 dt_region->paddr = dw->dt_region.paddr + chan->dt_off;
732 dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off;
733 dt_region->sz = dt_chunk;
734
735 dev_vdbg(dev, "Data:\tChannel %s[%u] off=0x%.8lx\n",
736 write ? "write" : "read", j, chan->dt_off);
737
738 dw_edma_v0_core_device_config(chan);
739 }
740
741 /* Set DMA channel capabilities */
742 dma_cap_zero(dma->cap_mask);
743 dma_cap_set(DMA_SLAVE, dma->cap_mask);
744 dma_cap_set(DMA_CYCLIC, dma->cap_mask);
745 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
746 dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
747 dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
748 dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
749 dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
750 dma->chancnt = cnt;
751
752 /* Set DMA channel callbacks */
753 dma->dev = chip->dev;
754 dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
755 dma->device_free_chan_resources = dw_edma_free_chan_resources;
756 dma->device_config = dw_edma_device_config;
757 dma->device_pause = dw_edma_device_pause;
758 dma->device_resume = dw_edma_device_resume;
759 dma->device_terminate_all = dw_edma_device_terminate_all;
760 dma->device_issue_pending = dw_edma_device_issue_pending;
761 dma->device_tx_status = dw_edma_device_tx_status;
762 dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
763 dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
764
765 dma_set_max_seg_size(dma->dev, U32_MAX);
766
767 /* Register DMA device */
768 err = dma_async_device_register(dma);
769
770 return err;
771 }
772
dw_edma_dec_irq_alloc(int * nr_irqs,u32 * alloc,u16 cnt)773 static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
774 {
775 if (*nr_irqs && *alloc < cnt) {
776 (*alloc)++;
777 (*nr_irqs)--;
778 }
779 }
780
dw_edma_add_irq_mask(u32 * mask,u32 alloc,u16 cnt)781 static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
782 {
783 while (*mask * alloc < cnt)
784 (*mask)++;
785 }
786
dw_edma_irq_request(struct dw_edma_chip * chip,u32 * wr_alloc,u32 * rd_alloc)787 static int dw_edma_irq_request(struct dw_edma_chip *chip,
788 u32 *wr_alloc, u32 *rd_alloc)
789 {
790 struct device *dev = chip->dev;
791 struct dw_edma *dw = chip->dw;
792 u32 wr_mask = 1;
793 u32 rd_mask = 1;
794 int i, err = 0;
795 u32 ch_cnt;
796 int irq;
797
798 ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
799
800 if (dw->nr_irqs < 1)
801 return -EINVAL;
802
803 if (dw->nr_irqs == 1) {
804 /* Common IRQ shared among all channels */
805 irq = dw->ops->irq_vector(dev, 0);
806 err = request_irq(irq, dw_edma_interrupt_common,
807 IRQF_SHARED, dw->name, &dw->irq[0]);
808 if (err) {
809 dw->nr_irqs = 0;
810 return err;
811 }
812
813 if (irq_get_msi_desc(irq))
814 get_cached_msi_msg(irq, &dw->irq[0].msi);
815 } else {
816 /* Distribute IRQs equally among all channels */
817 int tmp = dw->nr_irqs;
818
819 while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
820 dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
821 dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt);
822 }
823
824 dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt);
825 dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
826
827 for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
828 irq = dw->ops->irq_vector(dev, i);
829 err = request_irq(irq,
830 i < *wr_alloc ?
831 dw_edma_interrupt_write :
832 dw_edma_interrupt_read,
833 IRQF_SHARED, dw->name,
834 &dw->irq[i]);
835 if (err) {
836 dw->nr_irqs = i;
837 return err;
838 }
839
840 if (irq_get_msi_desc(irq))
841 get_cached_msi_msg(irq, &dw->irq[i].msi);
842 }
843
844 dw->nr_irqs = i;
845 }
846
847 return err;
848 }
849
dw_edma_probe(struct dw_edma_chip * chip)850 int dw_edma_probe(struct dw_edma_chip *chip)
851 {
852 struct device *dev;
853 struct dw_edma *dw;
854 u32 wr_alloc = 0;
855 u32 rd_alloc = 0;
856 int i, err;
857
858 if (!chip)
859 return -EINVAL;
860
861 dev = chip->dev;
862 if (!dev)
863 return -EINVAL;
864
865 dw = chip->dw;
866 if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector)
867 return -EINVAL;
868
869 raw_spin_lock_init(&dw->lock);
870
871 /* Find out how many write channels are supported by hardware */
872 dw->wr_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE);
873 if (!dw->wr_ch_cnt)
874 return -EINVAL;
875
876 /* Find out how many read channels are supported by hardware */
877 dw->rd_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ);
878 if (!dw->rd_ch_cnt)
879 return -EINVAL;
880
881 dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n",
882 dw->wr_ch_cnt, dw->rd_ch_cnt);
883
884 /* Allocate channels */
885 dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt,
886 sizeof(*dw->chan), GFP_KERNEL);
887 if (!dw->chan)
888 return -ENOMEM;
889
890 snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id);
891
892 /* Disable eDMA, only to establish the ideal initial conditions */
893 dw_edma_v0_core_off(dw);
894
895 /* Request IRQs */
896 err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc);
897 if (err)
898 return err;
899
900 /* Setup write channels */
901 err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc);
902 if (err)
903 goto err_irq_free;
904
905 /* Setup read channels */
906 err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc);
907 if (err)
908 goto err_irq_free;
909
910 /* Power management */
911 pm_runtime_enable(dev);
912
913 /* Turn debugfs on */
914 dw_edma_v0_core_debugfs_on(chip);
915
916 return 0;
917
918 err_irq_free:
919 for (i = (dw->nr_irqs - 1); i >= 0; i--)
920 free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
921
922 dw->nr_irqs = 0;
923
924 return err;
925 }
926 EXPORT_SYMBOL_GPL(dw_edma_probe);
927
dw_edma_remove(struct dw_edma_chip * chip)928 int dw_edma_remove(struct dw_edma_chip *chip)
929 {
930 struct dw_edma_chan *chan, *_chan;
931 struct device *dev = chip->dev;
932 struct dw_edma *dw = chip->dw;
933 int i;
934
935 /* Disable eDMA */
936 dw_edma_v0_core_off(dw);
937
938 /* Free irqs */
939 for (i = (dw->nr_irqs - 1); i >= 0; i--)
940 free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
941
942 /* Power management */
943 pm_runtime_disable(dev);
944
945 /* Deregister eDMA device */
946 dma_async_device_unregister(&dw->wr_edma);
947 list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
948 vc.chan.device_node) {
949 tasklet_kill(&chan->vc.task);
950 list_del(&chan->vc.chan.device_node);
951 }
952
953 dma_async_device_unregister(&dw->rd_edma);
954 list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
955 vc.chan.device_node) {
956 tasklet_kill(&chan->vc.task);
957 list_del(&chan->vc.chan.device_node);
958 }
959
960 /* Turn debugfs off */
961 dw_edma_v0_core_debugfs_off();
962
963 return 0;
964 }
965 EXPORT_SYMBOL_GPL(dw_edma_remove);
966
967 MODULE_LICENSE("GPL v2");
968 MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver");
969 MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");
970