Lines Matching +full:memcpy +full:- +full:bus +full:- +full:width
1 // SPDX-License-Identifier: GPL-2.0-or-later
7 * based on amba-pl08x.c
10 * Copyright (c) 2010 ST-Ericsson SA
16 * that can be routed to any of the 4 to 8 hardware-channels.
24 * - bursts
30 #include <linux/dma-mapping.h>
36 #include <linux/platform_data/dma-s3c24xx.h>
39 #include "virt-dma.h"
104 * SDI is valid on channels 0, 2 and 3 - with varying hw request sources.
116 * struct soc_data - vendor-specific config parameters for individual SoCs
119 * @has_clocks: are controllable dma-clocks present
128 * enum s3c24xx_dma_chan_state - holds the virtual channel states
133 * channel to become available (only pertains to memcpy channels)
142 * struct s3c24xx_sg - structure containing data per sg
156 * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor
160 * @width: transfer width
170 u8 width; member
180 * struct s3c24xx_dma_phy - holder for the physical channels
202 * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
212 * @slave: whether this channel is a device (slave) or for memcpy
227 * struct s3c24xx_dma_engine - the local state holder for the S3C24XX
232 * @memcpy: memcpy engine for this instance
241 struct dma_device memcpy; member
254 unsigned int val = readl(phy->base + S3C24XX_DSTAT); in s3c24xx_dma_phy_busy()
261 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; in s3c24xx_dma_phy_valid()
262 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; in s3c24xx_dma_phy_valid()
263 struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; in s3c24xx_dma_phy_valid()
267 if (!s3cchan->slave) in s3c24xx_dma_phy_valid()
271 if (s3cdma->sdata->has_reqsel) in s3c24xx_dma_phy_valid()
274 phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH)); in s3c24xx_dma_phy_valid()
288 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; in s3c24xx_dma_get_phy()
294 for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) { in s3c24xx_dma_get_phy()
295 phy = &s3cdma->phy_chans[i]; in s3c24xx_dma_get_phy()
297 if (!phy->valid) in s3c24xx_dma_get_phy()
303 spin_lock_irqsave(&phy->lock, flags); in s3c24xx_dma_get_phy()
305 if (!phy->serving) { in s3c24xx_dma_get_phy()
306 phy->serving = s3cchan; in s3c24xx_dma_get_phy()
307 spin_unlock_irqrestore(&phy->lock, flags); in s3c24xx_dma_get_phy()
311 spin_unlock_irqrestore(&phy->lock, flags); in s3c24xx_dma_get_phy()
315 if (i == s3cdma->pdata->num_phy_channels) { in s3c24xx_dma_get_phy()
316 dev_warn(&s3cdma->pdev->dev, "no phy channel available\n"); in s3c24xx_dma_get_phy()
321 if (s3cdma->sdata->has_clocks) { in s3c24xx_dma_get_phy()
322 ret = clk_enable(phy->clk); in s3c24xx_dma_get_phy()
324 dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n", in s3c24xx_dma_get_phy()
325 phy->id, ret); in s3c24xx_dma_get_phy()
326 phy->serving = NULL; in s3c24xx_dma_get_phy()
341 struct s3c24xx_dma_engine *s3cdma = phy->host; in s3c24xx_dma_put_phy()
343 if (s3cdma->sdata->has_clocks) in s3c24xx_dma_put_phy()
344 clk_disable(phy->clk); in s3c24xx_dma_put_phy()
346 phy->serving = NULL; in s3c24xx_dma_put_phy()
351 * This should not be used for an on-going transfer, but as a method of
357 writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG); in s3c24xx_dma_terminate_phy()
372 struct s3c24xx_dma_phy *phy = s3cchan->phy; in s3c24xx_dma_getbytes_chan()
373 struct s3c24xx_txd *txd = s3cchan->at; in s3c24xx_dma_getbytes_chan()
374 u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK; in s3c24xx_dma_getbytes_chan()
376 return tc * txd->width; in s3c24xx_dma_getbytes_chan()
387 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || in s3c24xx_dma_set_runtime_config()
388 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) in s3c24xx_dma_set_runtime_config()
389 return -EINVAL; in s3c24xx_dma_set_runtime_config()
391 spin_lock_irqsave(&s3cchan->vc.lock, flags); in s3c24xx_dma_set_runtime_config()
393 if (!s3cchan->slave) { in s3c24xx_dma_set_runtime_config()
394 ret = -EINVAL; in s3c24xx_dma_set_runtime_config()
398 s3cchan->cfg = *config; in s3c24xx_dma_set_runtime_config()
401 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); in s3c24xx_dma_set_runtime_config()
420 INIT_LIST_HEAD(&txd->dsg_list); in s3c24xx_dma_get_txd()
421 txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD; in s3c24xx_dma_get_txd()
431 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { in s3c24xx_dma_free_txd()
432 list_del(&dsg->node); in s3c24xx_dma_free_txd()
442 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; in s3c24xx_dma_start_next_sg()
443 struct s3c24xx_dma_phy *phy = s3cchan->phy; in s3c24xx_dma_start_next_sg()
444 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; in s3c24xx_dma_start_next_sg()
445 struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node); in s3c24xx_dma_start_next_sg()
446 u32 dcon = txd->dcon; in s3c24xx_dma_start_next_sg()
449 /* transfer-size and -count from len and width */ in s3c24xx_dma_start_next_sg()
450 switch (txd->width) { in s3c24xx_dma_start_next_sg()
452 dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len; in s3c24xx_dma_start_next_sg()
455 dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2); in s3c24xx_dma_start_next_sg()
458 dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4); in s3c24xx_dma_start_next_sg()
462 if (s3cchan->slave) { in s3c24xx_dma_start_next_sg()
464 &pdata->channels[s3cchan->id]; in s3c24xx_dma_start_next_sg()
466 if (s3cdma->sdata->has_reqsel) { in s3c24xx_dma_start_next_sg()
467 writel_relaxed((cdata->chansel << 1) | in s3c24xx_dma_start_next_sg()
469 phy->base + S3C24XX_DMAREQSEL); in s3c24xx_dma_start_next_sg()
471 int csel = cdata->chansel >> (phy->id * in s3c24xx_dma_start_next_sg()
479 if (s3cdma->sdata->has_reqsel) in s3c24xx_dma_start_next_sg()
480 writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL); in s3c24xx_dma_start_next_sg()
483 writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC); in s3c24xx_dma_start_next_sg()
484 writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC); in s3c24xx_dma_start_next_sg()
485 writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST); in s3c24xx_dma_start_next_sg()
486 writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC); in s3c24xx_dma_start_next_sg()
487 writel_relaxed(dcon, phy->base + S3C24XX_DCON); in s3c24xx_dma_start_next_sg()
489 val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG); in s3c24xx_dma_start_next_sg()
493 /* trigger the dma operation for memcpy transfers */ in s3c24xx_dma_start_next_sg()
494 if (!s3cchan->slave) in s3c24xx_dma_start_next_sg()
497 writel(val, phy->base + S3C24XX_DMASKTRIG); in s3c24xx_dma_start_next_sg()
505 struct s3c24xx_dma_phy *phy = s3cchan->phy; in s3c24xx_dma_start_next_txd()
506 struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc); in s3c24xx_dma_start_next_txd()
507 struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); in s3c24xx_dma_start_next_txd()
509 list_del(&txd->vd.node); in s3c24xx_dma_start_next_txd()
511 s3cchan->at = txd; in s3c24xx_dma_start_next_txd()
518 txd->at = txd->dsg_list.next; in s3c24xx_dma_start_next_txd()
529 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; in s3c24xx_dma_phy_alloc_and_start()
534 dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n", in s3c24xx_dma_phy_alloc_and_start()
535 s3cchan->name); in s3c24xx_dma_phy_alloc_and_start()
536 s3cchan->state = S3C24XX_DMA_CHAN_WAITING; in s3c24xx_dma_phy_alloc_and_start()
540 dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n", in s3c24xx_dma_phy_alloc_and_start()
541 phy->id, s3cchan->name); in s3c24xx_dma_phy_alloc_and_start()
543 s3cchan->phy = phy; in s3c24xx_dma_phy_alloc_and_start()
544 s3cchan->state = S3C24XX_DMA_CHAN_RUNNING; in s3c24xx_dma_phy_alloc_and_start()
552 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; in s3c24xx_dma_phy_reassign_start()
554 dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n", in s3c24xx_dma_phy_reassign_start()
555 phy->id, s3cchan->name); in s3c24xx_dma_phy_reassign_start()
560 * that this will only be called when it _already_ is non-NULL. in s3c24xx_dma_phy_reassign_start()
562 phy->serving = s3cchan; in s3c24xx_dma_phy_reassign_start()
563 s3cchan->phy = phy; in s3c24xx_dma_phy_reassign_start()
564 s3cchan->state = S3C24XX_DMA_CHAN_RUNNING; in s3c24xx_dma_phy_reassign_start()
574 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; in s3c24xx_dma_phy_free()
581 list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node) in s3c24xx_dma_phy_free()
582 if (p->state == S3C24XX_DMA_CHAN_WAITING) { in s3c24xx_dma_phy_free()
588 list_for_each_entry(p, &s3cdma->slave.channels, in s3c24xx_dma_phy_free()
590 if (p->state == S3C24XX_DMA_CHAN_WAITING && in s3c24xx_dma_phy_free()
591 s3c24xx_dma_phy_valid(p, s3cchan->phy)) { in s3c24xx_dma_phy_free()
598 s3c24xx_dma_terminate_phy(s3cchan->phy); in s3c24xx_dma_phy_free()
607 spin_lock(&next->vc.lock); in s3c24xx_dma_phy_free()
608 /* Re-check the state now that we have the lock */ in s3c24xx_dma_phy_free()
609 success = next->state == S3C24XX_DMA_CHAN_WAITING; in s3c24xx_dma_phy_free()
611 s3c24xx_dma_phy_reassign_start(s3cchan->phy, next); in s3c24xx_dma_phy_free()
612 spin_unlock(&next->vc.lock); in s3c24xx_dma_phy_free()
619 s3c24xx_dma_put_phy(s3cchan->phy); in s3c24xx_dma_phy_free()
622 s3cchan->phy = NULL; in s3c24xx_dma_phy_free()
623 s3cchan->state = S3C24XX_DMA_CHAN_IDLE; in s3c24xx_dma_phy_free()
628 struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); in s3c24xx_dma_desc_free()
629 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan); in s3c24xx_dma_desc_free()
631 if (!s3cchan->slave) in s3c24xx_dma_desc_free()
632 dma_descriptor_unmap(&vd->tx); in s3c24xx_dma_desc_free()
640 struct s3c24xx_dma_chan *s3cchan = phy->serving; in s3c24xx_dma_irq()
643 dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id); in s3c24xx_dma_irq()
653 dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n", in s3c24xx_dma_irq()
654 phy->id); in s3c24xx_dma_irq()
661 spin_lock(&s3cchan->vc.lock); in s3c24xx_dma_irq()
662 txd = s3cchan->at; in s3c24xx_dma_irq()
665 if (!list_is_last(txd->at, &txd->dsg_list)) { in s3c24xx_dma_irq()
666 txd->at = txd->at->next; in s3c24xx_dma_irq()
667 if (txd->cyclic) in s3c24xx_dma_irq()
668 vchan_cyclic_callback(&txd->vd); in s3c24xx_dma_irq()
670 } else if (!txd->cyclic) { in s3c24xx_dma_irq()
671 s3cchan->at = NULL; in s3c24xx_dma_irq()
672 vchan_cookie_complete(&txd->vd); in s3c24xx_dma_irq()
678 if (vchan_next_desc(&s3cchan->vc)) in s3c24xx_dma_irq()
683 vchan_cyclic_callback(&txd->vd); in s3c24xx_dma_irq()
686 txd->at = txd->dsg_list.next; in s3c24xx_dma_irq()
690 spin_unlock(&s3cchan->vc.lock); in s3c24xx_dma_irq()
702 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; in s3c24xx_dma_terminate_all()
707 spin_lock_irqsave(&s3cchan->vc.lock, flags); in s3c24xx_dma_terminate_all()
709 if (!s3cchan->phy && !s3cchan->at) { in s3c24xx_dma_terminate_all()
710 dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n", in s3c24xx_dma_terminate_all()
711 s3cchan->id); in s3c24xx_dma_terminate_all()
712 ret = -EINVAL; in s3c24xx_dma_terminate_all()
716 s3cchan->state = S3C24XX_DMA_CHAN_IDLE; in s3c24xx_dma_terminate_all()
719 if (s3cchan->phy) in s3c24xx_dma_terminate_all()
723 if (s3cchan->at) { in s3c24xx_dma_terminate_all()
724 vchan_terminate_vdesc(&s3cchan->at->vd); in s3c24xx_dma_terminate_all()
725 s3cchan->at = NULL; in s3c24xx_dma_terminate_all()
730 vchan_get_all_descriptors(&s3cchan->vc, &head); in s3c24xx_dma_terminate_all()
732 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); in s3c24xx_dma_terminate_all()
734 vchan_dma_desc_free_list(&s3cchan->vc, &head); in s3c24xx_dma_terminate_all()
739 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); in s3c24xx_dma_terminate_all()
748 vchan_synchronize(&s3cchan->vc); in s3c24xx_dma_synchronize()
768 spin_lock_irqsave(&s3cchan->vc.lock, flags); in s3c24xx_dma_tx_status()
776 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); in s3c24xx_dma_tx_status()
780 vd = vchan_find_desc(&s3cchan->vc, cookie); in s3c24xx_dma_tx_status()
783 txd = to_s3c24xx_txd(&vd->tx); in s3c24xx_dma_tx_status()
785 list_for_each_entry(dsg, &txd->dsg_list, node) in s3c24xx_dma_tx_status()
786 bytes += dsg->len; in s3c24xx_dma_tx_status()
792 txd = s3cchan->at; in s3c24xx_dma_tx_status()
794 dsg = list_entry(txd->at, struct s3c24xx_sg, node); in s3c24xx_dma_tx_status()
795 list_for_each_entry_from(dsg, &txd->dsg_list, node) in s3c24xx_dma_tx_status()
796 bytes += dsg->len; in s3c24xx_dma_tx_status()
800 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); in s3c24xx_dma_tx_status()
813 * Initialize a descriptor to be used by memcpy submit
820 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; in s3c24xx_dma_prep_memcpy()
825 dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %zu bytes from %s\n", in s3c24xx_dma_prep_memcpy()
826 len, s3cchan->name); in s3c24xx_dma_prep_memcpy()
829 dev_err(&s3cdma->pdev->dev, "memcpy size %zu to large\n", len); in s3c24xx_dma_prep_memcpy()
842 list_add_tail(&dsg->node, &txd->dsg_list); in s3c24xx_dma_prep_memcpy()
844 dsg->src_addr = src; in s3c24xx_dma_prep_memcpy()
845 dsg->dst_addr = dest; in s3c24xx_dma_prep_memcpy()
846 dsg->len = len; in s3c24xx_dma_prep_memcpy()
849 * Determine a suitable transfer width. in s3c24xx_dma_prep_memcpy()
851 * naturally aligned on the bus, i.e., a 4 byte fetch must start at in s3c24xx_dma_prep_memcpy()
852 * an address divisible by 4 - more generally addr % width must be 0. in s3c24xx_dma_prep_memcpy()
858 txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1; in s3c24xx_dma_prep_memcpy()
861 txd->width = ((src_mod == 2 || src_mod == 0) && in s3c24xx_dma_prep_memcpy()
865 txd->width = 1; in s3c24xx_dma_prep_memcpy()
869 txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT; in s3c24xx_dma_prep_memcpy()
870 txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT; in s3c24xx_dma_prep_memcpy()
871 txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK | in s3c24xx_dma_prep_memcpy()
874 return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); in s3c24xx_dma_prep_memcpy()
882 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; in s3c24xx_dma_prep_dma_cyclic()
883 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; in s3c24xx_dma_prep_dma_cyclic()
884 struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; in s3c24xx_dma_prep_dma_cyclic()
892 dev_dbg(&s3cdma->pdev->dev, in s3c24xx_dma_prep_dma_cyclic()
894 size, period, s3cchan->name); in s3c24xx_dma_prep_dma_cyclic()
897 dev_err(&s3cdma->pdev->dev, in s3c24xx_dma_prep_dma_cyclic()
906 txd->cyclic = 1; in s3c24xx_dma_prep_dma_cyclic()
908 if (cdata->handshake) in s3c24xx_dma_prep_dma_cyclic()
909 txd->dcon |= S3C24XX_DCON_HANDSHAKE; in s3c24xx_dma_prep_dma_cyclic()
911 switch (cdata->bus) { in s3c24xx_dma_prep_dma_cyclic()
913 txd->dcon |= S3C24XX_DCON_SYNC_PCLK; in s3c24xx_dma_prep_dma_cyclic()
917 txd->dcon |= S3C24XX_DCON_SYNC_HCLK; in s3c24xx_dma_prep_dma_cyclic()
932 txd->dcon |= S3C24XX_DCON_SERV_SINGLE; in s3c24xx_dma_prep_dma_cyclic()
935 txd->disrcc = S3C24XX_DISRCC_LOC_AHB | in s3c24xx_dma_prep_dma_cyclic()
937 txd->didstc = hwcfg; in s3c24xx_dma_prep_dma_cyclic()
938 slave_addr = s3cchan->cfg.dst_addr; in s3c24xx_dma_prep_dma_cyclic()
939 txd->width = s3cchan->cfg.dst_addr_width; in s3c24xx_dma_prep_dma_cyclic()
941 txd->disrcc = hwcfg; in s3c24xx_dma_prep_dma_cyclic()
942 txd->didstc = S3C24XX_DIDSTC_LOC_AHB | in s3c24xx_dma_prep_dma_cyclic()
944 slave_addr = s3cchan->cfg.src_addr; in s3c24xx_dma_prep_dma_cyclic()
945 txd->width = s3cchan->cfg.src_addr_width; in s3c24xx_dma_prep_dma_cyclic()
956 list_add_tail(&dsg->node, &txd->dsg_list); in s3c24xx_dma_prep_dma_cyclic()
958 dsg->len = period; in s3c24xx_dma_prep_dma_cyclic()
960 if (i == sg_len - 1) in s3c24xx_dma_prep_dma_cyclic()
961 dsg->len = size - period * i; in s3c24xx_dma_prep_dma_cyclic()
963 dsg->src_addr = addr + period * i; in s3c24xx_dma_prep_dma_cyclic()
964 dsg->dst_addr = slave_addr; in s3c24xx_dma_prep_dma_cyclic()
966 dsg->src_addr = slave_addr; in s3c24xx_dma_prep_dma_cyclic()
967 dsg->dst_addr = addr + period * i; in s3c24xx_dma_prep_dma_cyclic()
971 return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); in s3c24xx_dma_prep_dma_cyclic()
980 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; in s3c24xx_dma_prep_slave_sg()
981 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; in s3c24xx_dma_prep_slave_sg()
982 struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; in s3c24xx_dma_prep_slave_sg()
990 dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n", in s3c24xx_dma_prep_slave_sg()
991 sg_dma_len(sgl), s3cchan->name); in s3c24xx_dma_prep_slave_sg()
997 if (cdata->handshake) in s3c24xx_dma_prep_slave_sg()
998 txd->dcon |= S3C24XX_DCON_HANDSHAKE; in s3c24xx_dma_prep_slave_sg()
1000 switch (cdata->bus) { in s3c24xx_dma_prep_slave_sg()
1002 txd->dcon |= S3C24XX_DCON_SYNC_PCLK; in s3c24xx_dma_prep_slave_sg()
1006 txd->dcon |= S3C24XX_DCON_SYNC_HCLK; in s3c24xx_dma_prep_slave_sg()
1021 txd->dcon |= S3C24XX_DCON_SERV_SINGLE; in s3c24xx_dma_prep_slave_sg()
1024 txd->disrcc = S3C24XX_DISRCC_LOC_AHB | in s3c24xx_dma_prep_slave_sg()
1026 txd->didstc = hwcfg; in s3c24xx_dma_prep_slave_sg()
1027 slave_addr = s3cchan->cfg.dst_addr; in s3c24xx_dma_prep_slave_sg()
1028 txd->width = s3cchan->cfg.dst_addr_width; in s3c24xx_dma_prep_slave_sg()
1030 txd->disrcc = hwcfg; in s3c24xx_dma_prep_slave_sg()
1031 txd->didstc = S3C24XX_DIDSTC_LOC_AHB | in s3c24xx_dma_prep_slave_sg()
1033 slave_addr = s3cchan->cfg.src_addr; in s3c24xx_dma_prep_slave_sg()
1034 txd->width = s3cchan->cfg.src_addr_width; in s3c24xx_dma_prep_slave_sg()
1037 dev_err(&s3cdma->pdev->dev, in s3c24xx_dma_prep_slave_sg()
1048 list_add_tail(&dsg->node, &txd->dsg_list); in s3c24xx_dma_prep_slave_sg()
1050 dsg->len = sg_dma_len(sg); in s3c24xx_dma_prep_slave_sg()
1052 dsg->src_addr = sg_dma_address(sg); in s3c24xx_dma_prep_slave_sg()
1053 dsg->dst_addr = slave_addr; in s3c24xx_dma_prep_slave_sg()
1055 dsg->src_addr = slave_addr; in s3c24xx_dma_prep_slave_sg()
1056 dsg->dst_addr = sg_dma_address(sg); in s3c24xx_dma_prep_slave_sg()
1060 return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); in s3c24xx_dma_prep_slave_sg()
1072 spin_lock_irqsave(&s3cchan->vc.lock, flags); in s3c24xx_dma_issue_pending()
1073 if (vchan_issue_pending(&s3cchan->vc)) { in s3c24xx_dma_issue_pending()
1074 if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING) in s3c24xx_dma_issue_pending()
1077 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); in s3c24xx_dma_issue_pending()
1085 * Initialise the DMAC memcpy/slave channels.
1094 INIT_LIST_HEAD(&dmadev->channels); in s3c24xx_dma_init_virtual_channels()
1097 * Register as many many memcpy as we have physical channels, in s3c24xx_dma_init_virtual_channels()
1102 chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL); in s3c24xx_dma_init_virtual_channels()
1104 return -ENOMEM; in s3c24xx_dma_init_virtual_channels()
1106 chan->id = i; in s3c24xx_dma_init_virtual_channels()
1107 chan->host = s3cdma; in s3c24xx_dma_init_virtual_channels()
1108 chan->state = S3C24XX_DMA_CHAN_IDLE; in s3c24xx_dma_init_virtual_channels()
1111 chan->slave = true; in s3c24xx_dma_init_virtual_channels()
1112 chan->name = kasprintf(GFP_KERNEL, "slave%d", i); in s3c24xx_dma_init_virtual_channels()
1113 if (!chan->name) in s3c24xx_dma_init_virtual_channels()
1114 return -ENOMEM; in s3c24xx_dma_init_virtual_channels()
1116 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); in s3c24xx_dma_init_virtual_channels()
1117 if (!chan->name) in s3c24xx_dma_init_virtual_channels()
1118 return -ENOMEM; in s3c24xx_dma_init_virtual_channels()
1120 dev_dbg(dmadev->dev, in s3c24xx_dma_init_virtual_channels()
1122 chan->name); in s3c24xx_dma_init_virtual_channels()
1124 chan->vc.desc_free = s3c24xx_dma_desc_free; in s3c24xx_dma_init_virtual_channels()
1125 vchan_init(&chan->vc, dmadev); in s3c24xx_dma_init_virtual_channels()
1127 dev_info(dmadev->dev, "initialized %d virtual %s channels\n", in s3c24xx_dma_init_virtual_channels()
1128 i, slave ? "slave" : "memcpy"); in s3c24xx_dma_init_virtual_channels()
1138 next, &dmadev->channels, vc.chan.device_node) { in s3c24xx_dma_free_virtual_channels()
1139 list_del(&chan->vc.chan.device_node); in s3c24xx_dma_free_virtual_channels()
1140 tasklet_kill(&chan->vc.task); in s3c24xx_dma_free_virtual_channels()
1167 .name = "s3c2410-dma",
1170 .name = "s3c2412-dma",
1173 .name = "s3c2443-dma",
1182 platform_get_device_id(pdev)->driver_data; in s3c24xx_dma_get_soc_data()
1187 const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev); in s3c24xx_dma_probe()
1195 dev_err(&pdev->dev, "platform data missing\n"); in s3c24xx_dma_probe()
1196 return -ENODEV; in s3c24xx_dma_probe()
1200 if (pdata->num_phy_channels > MAX_DMA_CHANNELS) { in s3c24xx_dma_probe()
1201 dev_err(&pdev->dev, "too many dma channels %d, max %d\n", in s3c24xx_dma_probe()
1202 pdata->num_phy_channels, MAX_DMA_CHANNELS); in s3c24xx_dma_probe()
1203 return -EINVAL; in s3c24xx_dma_probe()
1208 return -EINVAL; in s3c24xx_dma_probe()
1210 s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL); in s3c24xx_dma_probe()
1212 return -ENOMEM; in s3c24xx_dma_probe()
1214 s3cdma->pdev = pdev; in s3c24xx_dma_probe()
1215 s3cdma->pdata = pdata; in s3c24xx_dma_probe()
1216 s3cdma->sdata = sdata; in s3c24xx_dma_probe()
1219 s3cdma->base = devm_ioremap_resource(&pdev->dev, res); in s3c24xx_dma_probe()
1220 if (IS_ERR(s3cdma->base)) in s3c24xx_dma_probe()
1221 return PTR_ERR(s3cdma->base); in s3c24xx_dma_probe()
1223 s3cdma->phy_chans = devm_kcalloc(&pdev->dev, in s3c24xx_dma_probe()
1224 pdata->num_phy_channels, in s3c24xx_dma_probe()
1227 if (!s3cdma->phy_chans) in s3c24xx_dma_probe()
1228 return -ENOMEM; in s3c24xx_dma_probe()
1231 for (i = 0; i < pdata->num_phy_channels; i++) { in s3c24xx_dma_probe()
1232 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; in s3c24xx_dma_probe()
1235 phy->id = i; in s3c24xx_dma_probe()
1236 phy->base = s3cdma->base + (i * sdata->stride); in s3c24xx_dma_probe()
1237 phy->host = s3cdma; in s3c24xx_dma_probe()
1239 phy->irq = platform_get_irq(pdev, i); in s3c24xx_dma_probe()
1240 if (phy->irq < 0) in s3c24xx_dma_probe()
1243 ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq, in s3c24xx_dma_probe()
1244 0, pdev->name, phy); in s3c24xx_dma_probe()
1246 dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n", in s3c24xx_dma_probe()
1251 if (sdata->has_clocks) { in s3c24xx_dma_probe()
1253 phy->clk = devm_clk_get(&pdev->dev, clk_name); in s3c24xx_dma_probe()
1254 if (IS_ERR(phy->clk) && sdata->has_clocks) { in s3c24xx_dma_probe()
1255 dev_err(&pdev->dev, "unable to acquire clock for channel %d, error %lu\n", in s3c24xx_dma_probe()
1256 i, PTR_ERR(phy->clk)); in s3c24xx_dma_probe()
1260 ret = clk_prepare(phy->clk); in s3c24xx_dma_probe()
1262 dev_err(&pdev->dev, "clock for phy %d failed, error %d\n", in s3c24xx_dma_probe()
1268 spin_lock_init(&phy->lock); in s3c24xx_dma_probe()
1269 phy->valid = true; in s3c24xx_dma_probe()
1271 dev_dbg(&pdev->dev, "physical channel %d is %s\n", in s3c24xx_dma_probe()
1275 /* Initialize memcpy engine */ in s3c24xx_dma_probe()
1276 dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask); in s3c24xx_dma_probe()
1277 dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask); in s3c24xx_dma_probe()
1278 s3cdma->memcpy.dev = &pdev->dev; in s3c24xx_dma_probe()
1279 s3cdma->memcpy.device_free_chan_resources = in s3c24xx_dma_probe()
1281 s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy; in s3c24xx_dma_probe()
1282 s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status; in s3c24xx_dma_probe()
1283 s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending; in s3c24xx_dma_probe()
1284 s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config; in s3c24xx_dma_probe()
1285 s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all; in s3c24xx_dma_probe()
1286 s3cdma->memcpy.device_synchronize = s3c24xx_dma_synchronize; in s3c24xx_dma_probe()
1289 dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); in s3c24xx_dma_probe()
1290 dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask); in s3c24xx_dma_probe()
1291 dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask); in s3c24xx_dma_probe()
1292 s3cdma->slave.dev = &pdev->dev; in s3c24xx_dma_probe()
1293 s3cdma->slave.device_free_chan_resources = in s3c24xx_dma_probe()
1295 s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status; in s3c24xx_dma_probe()
1296 s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending; in s3c24xx_dma_probe()
1297 s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg; in s3c24xx_dma_probe()
1298 s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic; in s3c24xx_dma_probe()
1299 s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config; in s3c24xx_dma_probe()
1300 s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all; in s3c24xx_dma_probe()
1301 s3cdma->slave.device_synchronize = s3c24xx_dma_synchronize; in s3c24xx_dma_probe()
1302 s3cdma->slave.filter.map = pdata->slave_map; in s3c24xx_dma_probe()
1303 s3cdma->slave.filter.mapcnt = pdata->slavecnt; in s3c24xx_dma_probe()
1304 s3cdma->slave.filter.fn = s3c24xx_dma_filter; in s3c24xx_dma_probe()
1306 /* Register as many memcpy channels as there are physical channels */ in s3c24xx_dma_probe()
1307 ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy, in s3c24xx_dma_probe()
1308 pdata->num_phy_channels, false); in s3c24xx_dma_probe()
1310 dev_warn(&pdev->dev, in s3c24xx_dma_probe()
1311 "%s failed to enumerate memcpy channels - %d\n", in s3c24xx_dma_probe()
1317 ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave, in s3c24xx_dma_probe()
1318 pdata->num_channels, true); in s3c24xx_dma_probe()
1320 dev_warn(&pdev->dev, in s3c24xx_dma_probe()
1321 "%s failed to enumerate slave channels - %d\n", in s3c24xx_dma_probe()
1326 ret = dma_async_device_register(&s3cdma->memcpy); in s3c24xx_dma_probe()
1328 dev_warn(&pdev->dev, in s3c24xx_dma_probe()
1329 "%s failed to register memcpy as an async device - %d\n", in s3c24xx_dma_probe()
1334 ret = dma_async_device_register(&s3cdma->slave); in s3c24xx_dma_probe()
1336 dev_warn(&pdev->dev, in s3c24xx_dma_probe()
1337 "%s failed to register slave as an async device - %d\n", in s3c24xx_dma_probe()
1343 dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n", in s3c24xx_dma_probe()
1344 pdata->num_phy_channels); in s3c24xx_dma_probe()
1349 dma_async_device_unregister(&s3cdma->memcpy); in s3c24xx_dma_probe()
1351 s3c24xx_dma_free_virtual_channels(&s3cdma->slave); in s3c24xx_dma_probe()
1353 s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy); in s3c24xx_dma_probe()
1355 if (sdata->has_clocks) in s3c24xx_dma_probe()
1356 for (i = 0; i < pdata->num_phy_channels; i++) { in s3c24xx_dma_probe()
1357 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; in s3c24xx_dma_probe()
1358 if (phy->valid) in s3c24xx_dma_probe()
1359 clk_unprepare(phy->clk); in s3c24xx_dma_probe()
1370 for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) { in s3c24xx_dma_free_irq()
1371 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; in s3c24xx_dma_free_irq()
1373 devm_free_irq(&pdev->dev, phy->irq, phy); in s3c24xx_dma_free_irq()
1379 const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev); in s3c24xx_dma_remove()
1384 dma_async_device_unregister(&s3cdma->slave); in s3c24xx_dma_remove()
1385 dma_async_device_unregister(&s3cdma->memcpy); in s3c24xx_dma_remove()
1389 s3c24xx_dma_free_virtual_channels(&s3cdma->slave); in s3c24xx_dma_remove()
1390 s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy); in s3c24xx_dma_remove()
1392 if (sdata->has_clocks) in s3c24xx_dma_remove()
1393 for (i = 0; i < pdata->num_phy_channels; i++) { in s3c24xx_dma_remove()
1394 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; in s3c24xx_dma_remove()
1395 if (phy->valid) in s3c24xx_dma_remove()
1396 clk_unprepare(phy->clk); in s3c24xx_dma_remove()
1404 .name = "s3c24xx-dma",
1417 if (chan->device->dev->driver != &s3c24xx_dma_driver.driver) in s3c24xx_dma_filter()
1422 return s3cchan->id == (uintptr_t)param; in s3c24xx_dma_filter()