Lines Matching +full:dma +full:- +full:channel +full:- +full:mask
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
11 #include <dt-bindings/dma/at91.h>
36 #define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
38 #define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
39 #define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
40 #define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
41 #define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */
43 #define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */
44 #define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */
45 #define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */
46 #define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */
47 #define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */
50 /* Channel relative registers offsets */
51 #define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
59 #define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
67 #define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
68 #define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
69 #define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
70 #define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
71 #define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
72 #define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
73 #define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
74 #define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
75 #define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
83 #define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
84 #define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
85 #define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
86 #define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
87 #define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
88 #define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
89 #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
90 #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
91 #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
93 #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
94 #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
95 #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
96 #define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
97 #define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
98 #define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
99 #define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
100 #define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
108 #define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
111 #define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
114 #define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
117 #define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
120 #define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
123 #define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
128 #define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
129 #define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
130 #define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
135 #define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
140 #define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
149 #define AT_XDMAC_CC_PERID(i) ((0x7f & (i)) << 24) /* Channel Peripheral Identifier */
150 #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
151 #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
152 #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
154 #define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */
183 /* ----- Channels ----- */
187 u32 mask; /* Channel Mask */ member
188 u32 cfg; /* Channel Configuration Register */
208 /* ----- Controller ----- */
210 struct dma_device dma; member
220 /* ----- Descriptors ----- */
235 /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
250 return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40); in at_xdmac_chan_reg_base()
253 #define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
255 writel_relaxed((value), (atxdmac)->regs + (reg))
257 #define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
258 #define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
267 return &chan->dev->device; in chan2dev()
272 return container_of(ddev, struct at_xdmac, dma); in to_at_xdmac()
282 return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); in at_xdmac_chan_is_cyclic()
287 return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); in at_xdmac_chan_is_paused()
294 csize = ffs(maxburst) - 1; in at_xdmac_csize()
296 csize = -EINVAL; in at_xdmac_csize()
314 "initial descriptors per channel (default: 64)");
319 return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask; in at_xdmac_chan_is_enabled()
324 at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L); in at_xdmac_off()
330 at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L); in at_xdmac_off()
337 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_start_xfer()
340 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first); in at_xdmac_start_xfer()
343 first->active_xfer = true; in at_xdmac_start_xfer()
346 reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys) in at_xdmac_start_xfer()
347 | AT_XDMAC_CNDA_NDAIF(atchan->memif); in at_xdmac_start_xfer()
357 else if ((first->lld.mbr_ubc & in at_xdmac_start_xfer()
365 * properly. This bit can be modified only by using the channel in at_xdmac_start_xfer()
368 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); in at_xdmac_start_xfer()
375 dev_vdbg(chan2dev(&atchan->chan), in at_xdmac_start_xfer()
389 if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg)) in at_xdmac_start_xfer()
393 * There is no end of list when doing cyclic dma, we need to get in at_xdmac_start_xfer()
402 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask); in at_xdmac_start_xfer()
403 dev_vdbg(chan2dev(&atchan->chan), in at_xdmac_start_xfer()
404 "%s: enable channel (0x%08x)\n", __func__, atchan->mask); in at_xdmac_start_xfer()
406 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); in at_xdmac_start_xfer()
408 dev_vdbg(chan2dev(&atchan->chan), in at_xdmac_start_xfer()
422 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); in at_xdmac_tx_submit()
426 spin_lock_irqsave(&atchan->lock, irqflags); in at_xdmac_tx_submit()
429 list_add_tail(&desc->xfer_node, &atchan->xfers_list); in at_xdmac_tx_submit()
430 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_tx_submit()
432 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", in at_xdmac_tx_submit()
442 struct at_xdmac *atxdmac = to_at_xdmac(chan->device); in at_xdmac_alloc_desc()
445 desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys); in at_xdmac_alloc_desc()
447 INIT_LIST_HEAD(&desc->descs_list); in at_xdmac_alloc_desc()
448 dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan); in at_xdmac_alloc_desc()
449 desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit; in at_xdmac_alloc_desc()
450 desc->tx_dma_desc.phys = phys; in at_xdmac_alloc_desc()
458 memset(&desc->lld, 0, sizeof(desc->lld)); in at_xdmac_init_used_desc()
459 INIT_LIST_HEAD(&desc->descs_list); in at_xdmac_init_used_desc()
460 desc->direction = DMA_TRANS_NONE; in at_xdmac_init_used_desc()
461 desc->xfer_size = 0; in at_xdmac_init_used_desc()
462 desc->active_xfer = false; in at_xdmac_init_used_desc()
470 if (list_empty(&atchan->free_descs_list)) { in at_xdmac_get_desc()
471 desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT); in at_xdmac_get_desc()
473 desc = list_first_entry(&atchan->free_descs_list, in at_xdmac_get_desc()
475 list_del(&desc->desc_node); in at_xdmac_get_desc()
489 prev->lld.mbr_nda = desc->tx_dma_desc.phys; in at_xdmac_queue_desc()
490 prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE; in at_xdmac_queue_desc()
493 __func__, prev, &prev->lld.mbr_nda); in at_xdmac_queue_desc()
502 desc->lld.mbr_bc++; in at_xdmac_increment_block_count()
512 struct at_xdmac *atxdmac = of_dma->of_dma_data; in at_xdmac_xlate()
515 struct device *dev = atxdmac->dma.dev; in at_xdmac_xlate()
517 if (dma_spec->args_count != 1) { in at_xdmac_xlate()
518 dev_err(dev, "dma phandler args: bad number of args\n"); in at_xdmac_xlate()
522 chan = dma_get_any_slave_channel(&atxdmac->dma); in at_xdmac_xlate()
524 dev_err(dev, "can't get a dma channel\n"); in at_xdmac_xlate()
529 atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]); in at_xdmac_xlate()
530 atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]); in at_xdmac_xlate()
531 atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]); in at_xdmac_xlate()
533 atchan->memif, atchan->perif, atchan->perid); in at_xdmac_xlate()
545 atchan->cfg = in at_xdmac_compute_chan_conf()
546 AT91_XDMAC_DT_PERID(atchan->perid) in at_xdmac_compute_chan_conf()
549 | AT_XDMAC_CC_DIF(atchan->memif) in at_xdmac_compute_chan_conf()
550 | AT_XDMAC_CC_SIF(atchan->perif) in at_xdmac_compute_chan_conf()
555 csize = ffs(atchan->sconfig.src_maxburst) - 1; in at_xdmac_compute_chan_conf()
558 return -EINVAL; in at_xdmac_compute_chan_conf()
560 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); in at_xdmac_compute_chan_conf()
561 dwidth = ffs(atchan->sconfig.src_addr_width) - 1; in at_xdmac_compute_chan_conf()
564 return -EINVAL; in at_xdmac_compute_chan_conf()
566 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); in at_xdmac_compute_chan_conf()
568 atchan->cfg = in at_xdmac_compute_chan_conf()
569 AT91_XDMAC_DT_PERID(atchan->perid) in at_xdmac_compute_chan_conf()
572 | AT_XDMAC_CC_DIF(atchan->perif) in at_xdmac_compute_chan_conf()
573 | AT_XDMAC_CC_SIF(atchan->memif) in at_xdmac_compute_chan_conf()
578 csize = ffs(atchan->sconfig.dst_maxburst) - 1; in at_xdmac_compute_chan_conf()
581 return -EINVAL; in at_xdmac_compute_chan_conf()
583 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); in at_xdmac_compute_chan_conf()
584 dwidth = ffs(atchan->sconfig.dst_addr_width) - 1; in at_xdmac_compute_chan_conf()
587 return -EINVAL; in at_xdmac_compute_chan_conf()
589 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); in at_xdmac_compute_chan_conf()
592 dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg); in at_xdmac_compute_chan_conf()
604 if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE) in at_xdmac_check_slave_config()
605 || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE)) in at_xdmac_check_slave_config()
606 return -EINVAL; in at_xdmac_check_slave_config()
608 if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH) in at_xdmac_check_slave_config()
609 || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH)) in at_xdmac_check_slave_config()
610 return -EINVAL; in at_xdmac_check_slave_config()
622 return -EINVAL; in at_xdmac_set_slave_config()
625 memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig)); in at_xdmac_set_slave_config()
647 dev_err(chan2dev(chan), "invalid DMA direction\n"); in at_xdmac_prep_slave_sg()
657 spin_lock_irqsave(&atchan->lock, irqflags); in at_xdmac_prep_slave_sg()
680 list_splice_init(&first->descs_list, &atchan->free_descs_list); in at_xdmac_prep_slave_sg()
686 desc->lld.mbr_sa = atchan->sconfig.src_addr; in at_xdmac_prep_slave_sg()
687 desc->lld.mbr_da = mem; in at_xdmac_prep_slave_sg()
689 desc->lld.mbr_sa = mem; in at_xdmac_prep_slave_sg()
690 desc->lld.mbr_da = atchan->sconfig.dst_addr; in at_xdmac_prep_slave_sg()
692 dwidth = at_xdmac_get_dwidth(atchan->cfg); in at_xdmac_prep_slave_sg()
696 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ in at_xdmac_prep_slave_sg()
700 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) | in at_xdmac_prep_slave_sg()
704 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); in at_xdmac_prep_slave_sg()
716 list_add_tail(&desc->desc_node, &first->descs_list); in at_xdmac_prep_slave_sg()
721 first->tx_dma_desc.flags = flags; in at_xdmac_prep_slave_sg()
722 first->xfer_size = xfer_size; in at_xdmac_prep_slave_sg()
723 first->direction = direction; in at_xdmac_prep_slave_sg()
724 ret = &first->tx_dma_desc; in at_xdmac_prep_slave_sg()
727 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_prep_slave_sg()
748 dev_err(chan2dev(chan), "invalid DMA direction\n"); in at_xdmac_prep_dma_cyclic()
752 if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) { in at_xdmac_prep_dma_cyclic()
753 dev_err(chan2dev(chan), "channel currently used\n"); in at_xdmac_prep_dma_cyclic()
763 spin_lock_irqsave(&atchan->lock, irqflags); in at_xdmac_prep_dma_cyclic()
768 list_splice_init(&first->descs_list, &atchan->free_descs_list); in at_xdmac_prep_dma_cyclic()
769 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_prep_dma_cyclic()
772 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_prep_dma_cyclic()
775 __func__, desc, &desc->tx_dma_desc.phys); in at_xdmac_prep_dma_cyclic()
778 desc->lld.mbr_sa = atchan->sconfig.src_addr; in at_xdmac_prep_dma_cyclic()
779 desc->lld.mbr_da = buf_addr + i * period_len; in at_xdmac_prep_dma_cyclic()
781 desc->lld.mbr_sa = buf_addr + i * period_len; in at_xdmac_prep_dma_cyclic()
782 desc->lld.mbr_da = atchan->sconfig.dst_addr; in at_xdmac_prep_dma_cyclic()
784 desc->lld.mbr_cfg = atchan->cfg; in at_xdmac_prep_dma_cyclic()
785 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 in at_xdmac_prep_dma_cyclic()
788 | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg); in at_xdmac_prep_dma_cyclic()
792 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); in at_xdmac_prep_dma_cyclic()
804 list_add_tail(&desc->desc_node, &first->descs_list); in at_xdmac_prep_dma_cyclic()
808 first->tx_dma_desc.flags = flags; in at_xdmac_prep_dma_cyclic()
809 first->xfer_size = buf_len; in at_xdmac_prep_dma_cyclic()
810 first->direction = direction; in at_xdmac_prep_dma_cyclic()
812 return &first->tx_dma_desc; in at_xdmac_prep_dma_cyclic()
857 * WARNING: The channel configuration is set here since there is no in at_xdmac_interleaved_queue_desc()
865 * match the one of another channel. If not, it could lead to spurious in at_xdmac_interleaved_queue_desc()
874 dwidth = at_xdmac_align_width(chan, src | dst | chunk->size); in at_xdmac_interleaved_queue_desc()
875 if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) { in at_xdmac_interleaved_queue_desc()
878 __func__, chunk->size, in at_xdmac_interleaved_queue_desc()
887 if (xt->src_inc) { in at_xdmac_interleaved_queue_desc()
888 if (xt->src_sgl) in at_xdmac_interleaved_queue_desc()
894 if (xt->dst_inc) { in at_xdmac_interleaved_queue_desc()
895 if (xt->dst_sgl) in at_xdmac_interleaved_queue_desc()
901 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_interleaved_queue_desc()
903 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_interleaved_queue_desc()
911 ublen = chunk->size >> dwidth; in at_xdmac_interleaved_queue_desc()
913 desc->lld.mbr_sa = src; in at_xdmac_interleaved_queue_desc()
914 desc->lld.mbr_da = dst; in at_xdmac_interleaved_queue_desc()
915 desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk); in at_xdmac_interleaved_queue_desc()
916 desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk); in at_xdmac_interleaved_queue_desc()
918 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3 in at_xdmac_interleaved_queue_desc()
922 desc->lld.mbr_cfg = chan_cc; in at_xdmac_interleaved_queue_desc()
926 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, in at_xdmac_interleaved_queue_desc()
927 desc->lld.mbr_ubc, desc->lld.mbr_cfg); in at_xdmac_interleaved_queue_desc()
948 if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM)) in at_xdmac_prep_interleaved()
955 if ((xt->numf > 1) && (xt->frame_size > 1)) in at_xdmac_prep_interleaved()
959 __func__, &xt->src_start, &xt->dst_start, xt->numf, in at_xdmac_prep_interleaved()
960 xt->frame_size, flags); in at_xdmac_prep_interleaved()
962 src_addr = xt->src_start; in at_xdmac_prep_interleaved()
963 dst_addr = xt->dst_start; in at_xdmac_prep_interleaved()
965 if (xt->numf > 1) { in at_xdmac_prep_interleaved()
969 xt, xt->sgl); in at_xdmac_prep_interleaved()
972 for (i = 0; i < xt->numf - 1; i++) in at_xdmac_prep_interleaved()
977 list_add_tail(&first->desc_node, &first->descs_list); in at_xdmac_prep_interleaved()
979 for (i = 0; i < xt->frame_size; i++) { in at_xdmac_prep_interleaved()
983 chunk = xt->sgl + i; in at_xdmac_prep_interleaved()
988 src_skip = chunk->size + src_icg; in at_xdmac_prep_interleaved()
989 dst_skip = chunk->size + dst_icg; in at_xdmac_prep_interleaved()
993 __func__, chunk->size, src_icg, dst_icg); in at_xdmac_prep_interleaved()
1000 list_splice_init(&first->descs_list, in at_xdmac_prep_interleaved()
1001 &atchan->free_descs_list); in at_xdmac_prep_interleaved()
1010 list_add_tail(&desc->desc_node, &first->descs_list); in at_xdmac_prep_interleaved()
1012 if (xt->src_sgl) in at_xdmac_prep_interleaved()
1015 if (xt->dst_sgl) in at_xdmac_prep_interleaved()
1018 len += chunk->size; in at_xdmac_prep_interleaved()
1023 first->tx_dma_desc.cookie = -EBUSY; in at_xdmac_prep_interleaved()
1024 first->tx_dma_desc.flags = flags; in at_xdmac_prep_interleaved()
1025 first->xfer_size = len; in at_xdmac_prep_interleaved()
1027 return &first->tx_dma_desc; in at_xdmac_prep_interleaved()
1047 * match the one of another channel. If not, it could lead to spurious in at_xdmac_prep_dma_memcpy()
1073 spin_lock_irqsave(&atchan->lock, irqflags); in at_xdmac_prep_dma_memcpy()
1075 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_prep_dma_memcpy()
1079 list_splice_init(&first->descs_list, &atchan->free_descs_list); in at_xdmac_prep_dma_memcpy()
1101 remaining_size -= xfer_size; in at_xdmac_prep_dma_memcpy()
1103 desc->lld.mbr_sa = src_addr; in at_xdmac_prep_dma_memcpy()
1104 desc->lld.mbr_da = dst_addr; in at_xdmac_prep_dma_memcpy()
1105 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 in at_xdmac_prep_dma_memcpy()
1109 desc->lld.mbr_cfg = chan_cc; in at_xdmac_prep_dma_memcpy()
1113 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg); in at_xdmac_prep_dma_memcpy()
1125 list_add_tail(&desc->desc_node, &first->descs_list); in at_xdmac_prep_dma_memcpy()
1128 first->tx_dma_desc.flags = flags; in at_xdmac_prep_dma_memcpy()
1129 first->xfer_size = len; in at_xdmac_prep_dma_memcpy()
1131 return &first->tx_dma_desc; in at_xdmac_prep_dma_memcpy()
1145 * WARNING: The channel configuration is set here since there is no in at_xdmac_memset_create_desc()
1153 * match the one of another channel. If not, it could lead to spurious in at_xdmac_memset_create_desc()
1174 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_memset_create_desc()
1176 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_memset_create_desc()
1186 desc->lld.mbr_da = dst_addr; in at_xdmac_memset_create_desc()
1187 desc->lld.mbr_ds = value; in at_xdmac_memset_create_desc()
1188 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3 in at_xdmac_memset_create_desc()
1192 desc->lld.mbr_cfg = chan_cc; in at_xdmac_memset_create_desc()
1196 __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, in at_xdmac_memset_create_desc()
1197 desc->lld.mbr_cfg); in at_xdmac_memset_create_desc()
1216 list_add_tail(&desc->desc_node, &desc->descs_list); in at_xdmac_prep_dma_memset()
1218 desc->tx_dma_desc.cookie = -EBUSY; in at_xdmac_prep_dma_memset()
1219 desc->tx_dma_desc.flags = flags; in at_xdmac_prep_dma_memset()
1220 desc->xfer_size = len; in at_xdmac_prep_dma_memset()
1222 return &desc->tx_dma_desc; in at_xdmac_prep_dma_memset()
1253 list_splice_init(&first->descs_list, in at_xdmac_prep_dma_memset_sg()
1254 &atchan->free_descs_list); in at_xdmac_prep_dma_memset_sg()
1262 stride = sg_dma_address(sg) - in at_xdmac_prep_dma_memset_sg()
1274 * +-------+ +-------+ +-------+ in at_xdmac_prep_dma_memset_sg()
1275 * | N-2 | | N-1 | | N | in at_xdmac_prep_dma_memset_sg()
1276 * +-------+ +-------+ +-------+ in at_xdmac_prep_dma_memset_sg()
1278 * We need all these three elements (N-2, N-1 and N) in at_xdmac_prep_dma_memset_sg()
1280 * queue N-1 or reuse N-2. in at_xdmac_prep_dma_memset_sg()
1293 * N-2 descriptor in at_xdmac_prep_dma_memset_sg()
1296 ppdesc->lld.mbr_dus = stride; in at_xdmac_prep_dma_memset_sg()
1299 * Put back the N-1 descriptor in the in at_xdmac_prep_dma_memset_sg()
1302 list_add_tail(&pdesc->desc_node, in at_xdmac_prep_dma_memset_sg()
1303 &atchan->free_descs_list); in at_xdmac_prep_dma_memset_sg()
1306 * Make our N-1 descriptor pointer in at_xdmac_prep_dma_memset_sg()
1307 * point to the N-2 since they were in at_xdmac_prep_dma_memset_sg()
1323 * Queue the N-1 descriptor after the in at_xdmac_prep_dma_memset_sg()
1324 * N-2 in at_xdmac_prep_dma_memset_sg()
1329 * Add the N-1 descriptor to the list in at_xdmac_prep_dma_memset_sg()
1333 list_add_tail(&desc->desc_node, in at_xdmac_prep_dma_memset_sg()
1334 &first->descs_list); in at_xdmac_prep_dma_memset_sg()
1348 if ((i == (sg_len - 1)) && in at_xdmac_prep_dma_memset_sg()
1355 * Increment the block count of the N-1 in at_xdmac_prep_dma_memset_sg()
1359 pdesc->lld.mbr_dus = stride; in at_xdmac_prep_dma_memset_sg()
1365 list_add_tail(&desc->desc_node, in at_xdmac_prep_dma_memset_sg()
1366 &atchan->free_descs_list); in at_xdmac_prep_dma_memset_sg()
1380 first->tx_dma_desc.cookie = -EBUSY; in at_xdmac_prep_dma_memset_sg()
1381 first->tx_dma_desc.flags = flags; in at_xdmac_prep_dma_memset_sg()
1382 first->xfer_size = len; in at_xdmac_prep_dma_memset_sg()
1384 return &first->tx_dma_desc; in at_xdmac_prep_dma_memset_sg()
1392 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_tx_status()
1397 u32 cur_nda, check_nda, cur_ubc, mask, value; in at_xdmac_tx_status() local
1409 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_tx_status()
1411 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); in at_xdmac_tx_status()
1417 if (!desc->active_xfer) { in at_xdmac_tx_status()
1418 dma_set_residue(txstate, desc->xfer_size); in at_xdmac_tx_status()
1422 residue = desc->xfer_size; in at_xdmac_tx_status()
1430 * timeout, it requests the residue. If the data are in the DMA FIFO, in at_xdmac_tx_status()
1436 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; in at_xdmac_tx_status()
1438 if ((desc->lld.mbr_cfg & mask) == value) { in at_xdmac_tx_status()
1439 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); in at_xdmac_tx_status()
1445 * The easiest way to compute the residue should be to pause the DMA in at_xdmac_tx_status()
1449 * - DMA is running therefore a descriptor change is possible while in at_xdmac_tx_status()
1451 * - When the block transfer is done, the value of the CUBC register in at_xdmac_tx_status()
1456 * INITD -------- ------------ in at_xdmac_tx_status()
1496 if ((desc->lld.mbr_cfg & mask) == value) { in at_xdmac_tx_status()
1497 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); in at_xdmac_tx_status()
1507 descs_list = &desc->descs_list; in at_xdmac_tx_status()
1509 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); in at_xdmac_tx_status()
1510 residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth; in at_xdmac_tx_status()
1511 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) in at_xdmac_tx_status()
1520 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); in at_xdmac_tx_status()
1523 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_tx_status()
1531 dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); in at_xdmac_remove_xfer()
1537 list_del(&desc->xfer_node); in at_xdmac_remove_xfer()
1538 list_splice_init(&desc->descs_list, &atchan->free_descs_list); in at_xdmac_remove_xfer()
1546 * If channel is enabled, do nothing, advance_work will be triggered in at_xdmac_advance_work()
1549 if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) { in at_xdmac_advance_work()
1550 desc = list_first_entry(&atchan->xfers_list, in at_xdmac_advance_work()
1553 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); in at_xdmac_advance_work()
1554 if (!desc->active_xfer) in at_xdmac_advance_work()
1564 spin_lock_irq(&atchan->lock); in at_xdmac_handle_cyclic()
1565 if (list_empty(&atchan->xfers_list)) { in at_xdmac_handle_cyclic()
1566 spin_unlock_irq(&atchan->lock); in at_xdmac_handle_cyclic()
1569 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, in at_xdmac_handle_cyclic()
1571 spin_unlock_irq(&atchan->lock); in at_xdmac_handle_cyclic()
1572 txd = &desc->tx_dma_desc; in at_xdmac_handle_cyclic()
1573 if (txd->flags & DMA_PREP_INTERRUPT) in at_xdmac_handle_cyclic()
1579 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_handle_error()
1588 if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) in at_xdmac_handle_error()
1589 dev_err(chan2dev(&atchan->chan), "read bus error!!!"); in at_xdmac_handle_error()
1590 if (atchan->irq_status & AT_XDMAC_CIS_WBEIS) in at_xdmac_handle_error()
1591 dev_err(chan2dev(&atchan->chan), "write bus error!!!"); in at_xdmac_handle_error()
1592 if (atchan->irq_status & AT_XDMAC_CIS_ROIS) in at_xdmac_handle_error()
1593 dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); in at_xdmac_handle_error()
1595 spin_lock_irq(&atchan->lock); in at_xdmac_handle_error()
1597 /* Channel must be disabled first as it's not done automatically */ in at_xdmac_handle_error()
1598 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); in at_xdmac_handle_error()
1599 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) in at_xdmac_handle_error()
1602 bad_desc = list_first_entry(&atchan->xfers_list, in at_xdmac_handle_error()
1606 spin_unlock_irq(&atchan->lock); in at_xdmac_handle_error()
1609 dev_dbg(chan2dev(&atchan->chan), in at_xdmac_handle_error()
1611 __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da, in at_xdmac_handle_error()
1612 bad_desc->lld.mbr_ubc); in at_xdmac_handle_error()
1623 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n", in at_xdmac_tasklet()
1624 __func__, atchan->irq_status); in at_xdmac_tasklet()
1632 } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS) in at_xdmac_tasklet()
1633 || (atchan->irq_status & error_mask)) { in at_xdmac_tasklet()
1636 if (atchan->irq_status & error_mask) in at_xdmac_tasklet()
1639 spin_lock_irq(&atchan->lock); in at_xdmac_tasklet()
1640 desc = list_first_entry(&atchan->xfers_list, in at_xdmac_tasklet()
1643 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); in at_xdmac_tasklet()
1644 if (!desc->active_xfer) { in at_xdmac_tasklet()
1645 dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting"); in at_xdmac_tasklet()
1646 spin_unlock_irq(&atchan->lock); in at_xdmac_tasklet()
1650 txd = &desc->tx_dma_desc; in at_xdmac_tasklet()
1653 spin_unlock_irq(&atchan->lock); in at_xdmac_tasklet()
1656 if (txd->flags & DMA_PREP_INTERRUPT) in at_xdmac_tasklet()
1661 spin_lock_irq(&atchan->lock); in at_xdmac_tasklet()
1663 spin_unlock_irq(&atchan->lock); in at_xdmac_tasklet()
1680 dev_vdbg(atxdmac->dma.dev, in at_xdmac_interrupt()
1687 /* We have to find which channel has generated the interrupt. */ in at_xdmac_interrupt()
1688 for (i = 0; i < atxdmac->dma.chancnt; i++) { in at_xdmac_interrupt()
1692 atchan = &atxdmac->chan[i]; in at_xdmac_interrupt()
1695 atchan->irq_status = chan_status & chan_imr; in at_xdmac_interrupt()
1696 dev_vdbg(atxdmac->dma.dev, in at_xdmac_interrupt()
1699 dev_vdbg(chan2dev(&atchan->chan), in at_xdmac_interrupt()
1709 if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) in at_xdmac_interrupt()
1710 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); in at_xdmac_interrupt()
1712 tasklet_schedule(&atchan->tasklet); in at_xdmac_interrupt()
1726 dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__); in at_xdmac_issue_pending()
1728 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_issue_pending()
1730 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_issue_pending()
1744 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_device_config()
1746 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_config()
1754 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_device_pause()
1759 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) in at_xdmac_device_pause()
1762 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_device_pause()
1763 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); in at_xdmac_device_pause()
1767 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_pause()
1775 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_device_resume()
1780 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_device_resume()
1782 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_resume()
1786 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); in at_xdmac_device_resume()
1787 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); in at_xdmac_device_resume()
1788 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_resume()
1797 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_device_terminate_all()
1802 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_device_terminate_all()
1803 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); in at_xdmac_device_terminate_all()
1804 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) in at_xdmac_device_terminate_all()
1808 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) in at_xdmac_device_terminate_all()
1811 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); in at_xdmac_device_terminate_all()
1812 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); in at_xdmac_device_terminate_all()
1813 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_terminate_all()
1826 "can't allocate channel resources (channel enabled)\n"); in at_xdmac_alloc_chan_resources()
1827 return -EIO; in at_xdmac_alloc_chan_resources()
1830 if (!list_empty(&atchan->free_descs_list)) { in at_xdmac_alloc_chan_resources()
1832 "can't allocate channel resources (channel not free from a previous use)\n"); in at_xdmac_alloc_chan_resources()
1833 return -EIO; in at_xdmac_alloc_chan_resources()
1843 list_add_tail(&desc->desc_node, &atchan->free_descs_list); in at_xdmac_alloc_chan_resources()
1856 struct at_xdmac *atxdmac = to_at_xdmac(chan->device); in at_xdmac_free_chan_resources()
1859 list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) { in at_xdmac_free_chan_resources()
1861 list_del(&desc->desc_node); in at_xdmac_free_chan_resources()
1862 dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys); in at_xdmac_free_chan_resources()
1874 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { in atmel_xdmac_prepare()
1879 return -EAGAIN; in atmel_xdmac_prepare()
1893 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { in atmel_xdmac_suspend()
1896 atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC); in atmel_xdmac_suspend()
1900 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); in atmel_xdmac_suspend()
1901 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA); in atmel_xdmac_suspend()
1902 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC); in atmel_xdmac_suspend()
1905 atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM); in atmel_xdmac_suspend()
1908 clk_disable_unprepare(atxdmac->clk); in atmel_xdmac_suspend()
1920 ret = clk_prepare_enable(atxdmac->clk); in atmel_xdmac_resume()
1925 for (i = 0; i < atxdmac->dma.chancnt; i++) { in atmel_xdmac_resume()
1926 atchan = &atxdmac->chan[i]; in atmel_xdmac_resume()
1931 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim); in atmel_xdmac_resume()
1932 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { in atmel_xdmac_resume()
1934 at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc); in atmel_xdmac_resume()
1938 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda); in atmel_xdmac_resume()
1939 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc); in atmel_xdmac_resume()
1940 at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim); in atmel_xdmac_resume()
1942 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); in atmel_xdmac_resume()
1972 dev_err(&pdev->dev, "invalid number of channels (%u)\n", in at_xdmac_probe()
1974 return -EINVAL; in at_xdmac_probe()
1979 atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); in at_xdmac_probe()
1981 dev_err(&pdev->dev, "can't allocate at_xdmac structure\n"); in at_xdmac_probe()
1982 return -ENOMEM; in at_xdmac_probe()
1985 atxdmac->regs = base; in at_xdmac_probe()
1986 atxdmac->irq = irq; in at_xdmac_probe()
1988 atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk"); in at_xdmac_probe()
1989 if (IS_ERR(atxdmac->clk)) { in at_xdmac_probe()
1990 dev_err(&pdev->dev, "can't get dma_clk\n"); in at_xdmac_probe()
1991 return PTR_ERR(atxdmac->clk); in at_xdmac_probe()
1995 ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac); in at_xdmac_probe()
1997 dev_err(&pdev->dev, "can't request irq\n"); in at_xdmac_probe()
2001 ret = clk_prepare_enable(atxdmac->clk); in at_xdmac_probe()
2003 dev_err(&pdev->dev, "can't prepare or enable clock\n"); in at_xdmac_probe()
2007 atxdmac->at_xdmac_desc_pool = in at_xdmac_probe()
2008 dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, in at_xdmac_probe()
2010 if (!atxdmac->at_xdmac_desc_pool) { in at_xdmac_probe()
2011 dev_err(&pdev->dev, "no memory for descriptors dma pool\n"); in at_xdmac_probe()
2012 ret = -ENOMEM; in at_xdmac_probe()
2016 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask); in at_xdmac_probe()
2017 dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask); in at_xdmac_probe()
2018 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask); in at_xdmac_probe()
2019 dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask); in at_xdmac_probe()
2020 dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask); in at_xdmac_probe()
2021 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask); in at_xdmac_probe()
2024 * one channel, second allocation fails in private_candidate. in at_xdmac_probe()
2026 dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask); in at_xdmac_probe()
2027 atxdmac->dma.dev = &pdev->dev; in at_xdmac_probe()
2028 atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources; in at_xdmac_probe()
2029 atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources; in at_xdmac_probe()
2030 atxdmac->dma.device_tx_status = at_xdmac_tx_status; in at_xdmac_probe()
2031 atxdmac->dma.device_issue_pending = at_xdmac_issue_pending; in at_xdmac_probe()
2032 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic; in at_xdmac_probe()
2033 atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved; in at_xdmac_probe()
2034 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; in at_xdmac_probe()
2035 atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset; in at_xdmac_probe()
2036 atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg; in at_xdmac_probe()
2037 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; in at_xdmac_probe()
2038 atxdmac->dma.device_config = at_xdmac_device_config; in at_xdmac_probe()
2039 atxdmac->dma.device_pause = at_xdmac_device_pause; in at_xdmac_probe()
2040 atxdmac->dma.device_resume = at_xdmac_device_resume; in at_xdmac_probe()
2041 atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all; in at_xdmac_probe()
2042 atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; in at_xdmac_probe()
2043 atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; in at_xdmac_probe()
2044 atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); in at_xdmac_probe()
2045 atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in at_xdmac_probe()
2051 INIT_LIST_HEAD(&atxdmac->dma.channels); in at_xdmac_probe()
2053 struct at_xdmac_chan *atchan = &atxdmac->chan[i]; in at_xdmac_probe()
2055 atchan->chan.device = &atxdmac->dma; in at_xdmac_probe()
2056 list_add_tail(&atchan->chan.device_node, in at_xdmac_probe()
2057 &atxdmac->dma.channels); in at_xdmac_probe()
2059 atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i); in at_xdmac_probe()
2060 atchan->mask = 1 << i; in at_xdmac_probe()
2062 spin_lock_init(&atchan->lock); in at_xdmac_probe()
2063 INIT_LIST_HEAD(&atchan->xfers_list); in at_xdmac_probe()
2064 INIT_LIST_HEAD(&atchan->free_descs_list); in at_xdmac_probe()
2065 tasklet_setup(&atchan->tasklet, at_xdmac_tasklet); in at_xdmac_probe()
2073 ret = dma_async_device_register(&atxdmac->dma); in at_xdmac_probe()
2075 dev_err(&pdev->dev, "fail to register DMA engine device\n"); in at_xdmac_probe()
2079 ret = of_dma_controller_register(pdev->dev.of_node, in at_xdmac_probe()
2082 dev_err(&pdev->dev, "could not register of dma controller\n"); in at_xdmac_probe()
2086 dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n", in at_xdmac_probe()
2087 nr_channels, atxdmac->regs); in at_xdmac_probe()
2092 dma_async_device_unregister(&atxdmac->dma); in at_xdmac_probe()
2094 clk_disable_unprepare(atxdmac->clk); in at_xdmac_probe()
2096 free_irq(atxdmac->irq, atxdmac); in at_xdmac_probe()
2106 of_dma_controller_free(pdev->dev.of_node); in at_xdmac_remove()
2107 dma_async_device_unregister(&atxdmac->dma); in at_xdmac_remove()
2108 clk_disable_unprepare(atxdmac->clk); in at_xdmac_remove()
2110 free_irq(atxdmac->irq, atxdmac); in at_xdmac_remove()
2112 for (i = 0; i < atxdmac->dma.chancnt; i++) { in at_xdmac_remove()
2113 struct at_xdmac_chan *atchan = &atxdmac->chan[i]; in at_xdmac_remove()
2115 tasklet_kill(&atchan->tasklet); in at_xdmac_remove()
2116 at_xdmac_free_chan_resources(&atchan->chan); in at_xdmac_remove()
2129 .compatible = "atmel,sama5d4-dma",
2152 MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");