• Home
  • Raw
  • Download

Lines Matching +full:fu540 +full:- +full:c000 +full:- +full:v1

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * SiFive FU540 Platform DMA driver
7 * - drivers/dma/fsl-edma.c
8 * - drivers/dma/dw-edma/
9 * - drivers/dma/pxa-dma.c
12 * - Chapter 12 "Platform DMA Engine (PDMA)" of
13 * SiFive FU540-C000 v1.0
14 * https://static.dev.sifive.com/FU540-C000-v1.0.pdf
21 #include <linux/dma-mapping.h>
25 #include "sf-pdma.h"
60 desc->chan = chan; in sf_pdma_alloc_desc()
68 desc->xfer_type = PDMA_FULL_SPEED; in sf_pdma_fill_desc()
69 desc->xfer_size = size; in sf_pdma_fill_desc()
70 desc->dst_addr = dst; in sf_pdma_fill_desc()
71 desc->src_addr = src; in sf_pdma_fill_desc()
76 struct pdma_regs *regs = &chan->regs; in sf_pdma_disclaim_chan()
78 writel(PDMA_CLEAR_CTRL, regs->ctrl); in sf_pdma_disclaim_chan()
90 dev_err(chan->pdma->dma_dev.dev, in sf_pdma_prep_dma_memcpy()
99 desc->in_use = true; in sf_pdma_prep_dma_memcpy()
100 desc->dirn = DMA_MEM_TO_MEM; in sf_pdma_prep_dma_memcpy()
101 desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); in sf_pdma_prep_dma_memcpy()
103 spin_lock_irqsave(&chan->vchan.lock, iflags); in sf_pdma_prep_dma_memcpy()
105 spin_unlock_irqrestore(&chan->vchan.lock, iflags); in sf_pdma_prep_dma_memcpy()
107 return desc->async_tx; in sf_pdma_prep_dma_memcpy()
115 memcpy(&chan->cfg, cfg, sizeof(*cfg)); in sf_pdma_slave_config()
123 struct pdma_regs *regs = &chan->regs; in sf_pdma_alloc_chan_resources()
126 writel(PDMA_CLAIM_MASK, regs->ctrl); in sf_pdma_alloc_chan_resources()
133 struct pdma_regs *regs = &chan->regs; in sf_pdma_disable_request()
135 writel(readl(regs->ctrl) & ~PDMA_RUN_MASK, regs->ctrl); in sf_pdma_disable_request()
144 spin_lock_irqsave(&chan->vchan.lock, flags); in sf_pdma_free_chan_resources()
146 kfree(chan->desc); in sf_pdma_free_chan_resources()
147 chan->desc = NULL; in sf_pdma_free_chan_resources()
148 vchan_get_all_descriptors(&chan->vchan, &head); in sf_pdma_free_chan_resources()
150 spin_unlock_irqrestore(&chan->vchan.lock, flags); in sf_pdma_free_chan_resources()
151 vchan_dma_desc_free_list(&chan->vchan, &head); in sf_pdma_free_chan_resources()
158 struct pdma_regs *regs = &chan->regs; in sf_pdma_desc_residue()
164 spin_lock_irqsave(&chan->vchan.lock, flags); in sf_pdma_desc_residue()
166 list_for_each_entry(vd, &chan->vchan.desc_submitted, node) in sf_pdma_desc_residue()
167 if (vd->tx.cookie == cookie) in sf_pdma_desc_residue()
168 tx = &vd->tx; in sf_pdma_desc_residue()
173 if (cookie == tx->chan->completed_cookie) in sf_pdma_desc_residue()
176 if (cookie == tx->cookie) { in sf_pdma_desc_residue()
177 residue = readq(regs->residue); in sf_pdma_desc_residue()
179 vd = vchan_find_desc(&chan->vchan, cookie); in sf_pdma_desc_residue()
184 residue = desc->xfer_size; in sf_pdma_desc_residue()
188 spin_unlock_irqrestore(&chan->vchan.lock, flags); in sf_pdma_desc_residue()
214 spin_lock_irqsave(&chan->vchan.lock, flags); in sf_pdma_terminate_all()
216 kfree(chan->desc); in sf_pdma_terminate_all()
217 chan->desc = NULL; in sf_pdma_terminate_all()
218 chan->xfer_err = false; in sf_pdma_terminate_all()
219 vchan_get_all_descriptors(&chan->vchan, &head); in sf_pdma_terminate_all()
220 spin_unlock_irqrestore(&chan->vchan.lock, flags); in sf_pdma_terminate_all()
221 vchan_dma_desc_free_list(&chan->vchan, &head); in sf_pdma_terminate_all()
228 struct pdma_regs *regs = &chan->regs; in sf_pdma_enable_request()
236 writel(v, regs->ctrl); in sf_pdma_enable_request()
241 struct virt_dma_chan *vchan = &chan->vchan; in sf_pdma_get_first_pending_desc()
244 if (list_empty(&vchan->desc_issued)) in sf_pdma_get_first_pending_desc()
247 vdesc = list_first_entry(&vchan->desc_issued, struct virt_dma_desc, node); in sf_pdma_get_first_pending_desc()
254 struct sf_pdma_desc *desc = chan->desc; in sf_pdma_xfer_desc()
255 struct pdma_regs *regs = &chan->regs; in sf_pdma_xfer_desc()
258 dev_err(chan->pdma->dma_dev.dev, "NULL desc.\n"); in sf_pdma_xfer_desc()
262 writel(desc->xfer_type, regs->xfer_type); in sf_pdma_xfer_desc()
263 writeq(desc->xfer_size, regs->xfer_size); in sf_pdma_xfer_desc()
264 writeq(desc->dst_addr, regs->dst_addr); in sf_pdma_xfer_desc()
265 writeq(desc->src_addr, regs->src_addr); in sf_pdma_xfer_desc()
267 chan->desc = desc; in sf_pdma_xfer_desc()
268 chan->status = DMA_IN_PROGRESS; in sf_pdma_xfer_desc()
277 spin_lock_irqsave(&chan->vchan.lock, flags); in sf_pdma_issue_pending()
279 if (!chan->desc && vchan_issue_pending(&chan->vchan)) { in sf_pdma_issue_pending()
281 chan->desc = sf_pdma_get_first_pending_desc(chan); in sf_pdma_issue_pending()
285 spin_unlock_irqrestore(&chan->vchan.lock, flags); in sf_pdma_issue_pending()
293 desc->in_use = false; in sf_pdma_free_desc()
301 spin_lock_irqsave(&chan->lock, flags); in sf_pdma_donebh_tasklet()
302 if (chan->xfer_err) { in sf_pdma_donebh_tasklet()
303 chan->retries = MAX_RETRY; in sf_pdma_donebh_tasklet()
304 chan->status = DMA_COMPLETE; in sf_pdma_donebh_tasklet()
305 chan->xfer_err = false; in sf_pdma_donebh_tasklet()
307 spin_unlock_irqrestore(&chan->lock, flags); in sf_pdma_donebh_tasklet()
309 spin_lock_irqsave(&chan->vchan.lock, flags); in sf_pdma_donebh_tasklet()
310 list_del(&chan->desc->vdesc.node); in sf_pdma_donebh_tasklet()
311 vchan_cookie_complete(&chan->desc->vdesc); in sf_pdma_donebh_tasklet()
313 chan->desc = sf_pdma_get_first_pending_desc(chan); in sf_pdma_donebh_tasklet()
314 if (chan->desc) in sf_pdma_donebh_tasklet()
317 spin_unlock_irqrestore(&chan->vchan.lock, flags); in sf_pdma_donebh_tasklet()
323 struct sf_pdma_desc *desc = chan->desc; in sf_pdma_errbh_tasklet()
326 spin_lock_irqsave(&chan->lock, flags); in sf_pdma_errbh_tasklet()
327 if (chan->retries <= 0) { in sf_pdma_errbh_tasklet()
329 spin_unlock_irqrestore(&chan->lock, flags); in sf_pdma_errbh_tasklet()
330 dmaengine_desc_get_callback_invoke(desc->async_tx, NULL); in sf_pdma_errbh_tasklet()
333 chan->retries--; in sf_pdma_errbh_tasklet()
334 chan->xfer_err = true; in sf_pdma_errbh_tasklet()
335 chan->status = DMA_ERROR; in sf_pdma_errbh_tasklet()
338 spin_unlock_irqrestore(&chan->lock, flags); in sf_pdma_errbh_tasklet()
345 struct pdma_regs *regs = &chan->regs; in sf_pdma_done_isr()
349 spin_lock_irqsave(&chan->vchan.lock, flags); in sf_pdma_done_isr()
350 writel((readl(regs->ctrl)) & ~PDMA_DONE_STATUS_MASK, regs->ctrl); in sf_pdma_done_isr()
351 residue = readq(regs->residue); in sf_pdma_done_isr()
354 tasklet_hi_schedule(&chan->done_tasklet); in sf_pdma_done_isr()
357 struct sf_pdma_desc *desc = chan->desc; in sf_pdma_done_isr()
359 desc->src_addr += desc->xfer_size - residue; in sf_pdma_done_isr()
360 desc->dst_addr += desc->xfer_size - residue; in sf_pdma_done_isr()
361 desc->xfer_size = residue; in sf_pdma_done_isr()
366 spin_unlock_irqrestore(&chan->vchan.lock, flags); in sf_pdma_done_isr()
374 struct pdma_regs *regs = &chan->regs; in sf_pdma_err_isr()
377 spin_lock_irqsave(&chan->lock, flags); in sf_pdma_err_isr()
378 writel((readl(regs->ctrl)) & ~PDMA_ERR_STATUS_MASK, regs->ctrl); in sf_pdma_err_isr()
379 spin_unlock_irqrestore(&chan->lock, flags); in sf_pdma_err_isr()
381 tasklet_schedule(&chan->err_tasklet); in sf_pdma_err_isr()
387 * sf_pdma_irq_init() - Init PDMA IRQ Handlers
392 * make sure the pointer passed in are non-NULL. This function should be called
398 * * 0 - OK to init all IRQ handlers
399 * * -EINVAL - Fail to request IRQ
406 for (i = 0; i < pdma->n_chans; i++) { in sf_pdma_irq_init()
407 chan = &pdma->chans[i]; in sf_pdma_irq_init()
411 dev_err(&pdev->dev, "ch(%d) Can't get done irq.\n", i); in sf_pdma_irq_init()
412 return -EINVAL; in sf_pdma_irq_init()
415 r = devm_request_irq(&pdev->dev, irq, sf_pdma_done_isr, 0, in sf_pdma_irq_init()
416 dev_name(&pdev->dev), (void *)chan); in sf_pdma_irq_init()
418 dev_err(&pdev->dev, "Fail to attach done ISR: %d\n", r); in sf_pdma_irq_init()
419 return -EINVAL; in sf_pdma_irq_init()
422 chan->txirq = irq; in sf_pdma_irq_init()
426 dev_err(&pdev->dev, "ch(%d) Can't get err irq.\n", i); in sf_pdma_irq_init()
427 return -EINVAL; in sf_pdma_irq_init()
430 r = devm_request_irq(&pdev->dev, irq, sf_pdma_err_isr, 0, in sf_pdma_irq_init()
431 dev_name(&pdev->dev), (void *)chan); in sf_pdma_irq_init()
433 dev_err(&pdev->dev, "Fail to attach err ISR: %d\n", r); in sf_pdma_irq_init()
434 return -EINVAL; in sf_pdma_irq_init()
437 chan->errirq = irq; in sf_pdma_irq_init()
444 * sf_pdma_setup_chans() - Init settings of each channel
448 * the pointer passed in are non-NULL. This function should be called only
460 INIT_LIST_HEAD(&pdma->dma_dev.channels); in sf_pdma_setup_chans()
462 for (i = 0; i < pdma->n_chans; i++) { in sf_pdma_setup_chans()
463 chan = &pdma->chans[i]; in sf_pdma_setup_chans()
465 chan->regs.ctrl = in sf_pdma_setup_chans()
467 chan->regs.xfer_type = in sf_pdma_setup_chans()
469 chan->regs.xfer_size = in sf_pdma_setup_chans()
471 chan->regs.dst_addr = in sf_pdma_setup_chans()
473 chan->regs.src_addr = in sf_pdma_setup_chans()
475 chan->regs.act_type = in sf_pdma_setup_chans()
477 chan->regs.residue = in sf_pdma_setup_chans()
479 chan->regs.cur_dst_addr = in sf_pdma_setup_chans()
481 chan->regs.cur_src_addr = in sf_pdma_setup_chans()
484 chan->pdma = pdma; in sf_pdma_setup_chans()
485 chan->pm_state = RUNNING; in sf_pdma_setup_chans()
486 chan->slave_id = i; in sf_pdma_setup_chans()
487 chan->xfer_err = false; in sf_pdma_setup_chans()
488 spin_lock_init(&chan->lock); in sf_pdma_setup_chans()
490 chan->vchan.desc_free = sf_pdma_free_desc; in sf_pdma_setup_chans()
491 vchan_init(&chan->vchan, &pdma->dma_dev); in sf_pdma_setup_chans()
493 writel(PDMA_CLEAR_CTRL, chan->regs.ctrl); in sf_pdma_setup_chans()
495 tasklet_setup(&chan->done_tasklet, sf_pdma_donebh_tasklet); in sf_pdma_setup_chans()
496 tasklet_setup(&chan->err_tasklet, sf_pdma_errbh_tasklet); in sf_pdma_setup_chans()
515 pdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); in sf_pdma_probe()
517 return -ENOMEM; in sf_pdma_probe()
519 pdma->n_chans = chans; in sf_pdma_probe()
522 pdma->membase = devm_ioremap_resource(&pdev->dev, res); in sf_pdma_probe()
523 if (IS_ERR(pdma->membase)) in sf_pdma_probe()
524 return PTR_ERR(pdma->membase); in sf_pdma_probe()
532 pdma->dma_dev.dev = &pdev->dev; in sf_pdma_probe()
535 dma_cap_set(DMA_MEMCPY, pdma->dma_dev.cap_mask); in sf_pdma_probe()
536 pdma->dma_dev.copy_align = 2; in sf_pdma_probe()
537 pdma->dma_dev.src_addr_widths = widths; in sf_pdma_probe()
538 pdma->dma_dev.dst_addr_widths = widths; in sf_pdma_probe()
539 pdma->dma_dev.directions = BIT(DMA_MEM_TO_MEM); in sf_pdma_probe()
540 pdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; in sf_pdma_probe()
541 pdma->dma_dev.descriptor_reuse = true; in sf_pdma_probe()
544 pdma->dma_dev.device_alloc_chan_resources = in sf_pdma_probe()
546 pdma->dma_dev.device_free_chan_resources = in sf_pdma_probe()
548 pdma->dma_dev.device_tx_status = sf_pdma_tx_status; in sf_pdma_probe()
549 pdma->dma_dev.device_prep_dma_memcpy = sf_pdma_prep_dma_memcpy; in sf_pdma_probe()
550 pdma->dma_dev.device_config = sf_pdma_slave_config; in sf_pdma_probe()
551 pdma->dma_dev.device_terminate_all = sf_pdma_terminate_all; in sf_pdma_probe()
552 pdma->dma_dev.device_issue_pending = sf_pdma_issue_pending; in sf_pdma_probe()
556 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in sf_pdma_probe()
558 dev_warn(&pdev->dev, in sf_pdma_probe()
561 ret = dma_async_device_register(&pdma->dma_dev); in sf_pdma_probe()
563 dev_err(&pdev->dev, in sf_pdma_probe()
578 ch = &pdma->chans[i]; in sf_pdma_remove()
580 devm_free_irq(&pdev->dev, ch->txirq, ch); in sf_pdma_remove()
581 devm_free_irq(&pdev->dev, ch->errirq, ch); in sf_pdma_remove()
582 list_del(&ch->vchan.chan.device_node); in sf_pdma_remove()
583 tasklet_kill(&ch->vchan.task); in sf_pdma_remove()
584 tasklet_kill(&ch->done_tasklet); in sf_pdma_remove()
585 tasklet_kill(&ch->err_tasklet); in sf_pdma_remove()
588 dma_async_device_unregister(&pdma->dma_dev); in sf_pdma_remove()
594 { .compatible = "sifive,fu540-c000-pdma" },
603 .name = "sf-pdma",