Lines Matching +full:dma +full:- +full:channel +full:- +full:mask
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
11 #include <dt-bindings/dma/at91.h>
36 #define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
38 #define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
39 #define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
40 #define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
41 #define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */
43 #define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */
44 #define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */
45 #define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */
46 #define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */
47 #define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */
50 /* Channel relative registers offsets */
51 #define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
59 #define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
67 #define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
68 #define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
69 #define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
70 #define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
71 #define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
72 #define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
73 #define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
74 #define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
75 #define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
83 #define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
84 #define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
85 #define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
86 #define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
87 #define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
88 #define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
89 #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
90 #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
91 #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
93 #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
94 #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
95 #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
96 #define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
97 #define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
98 #define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
99 #define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
100 #define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
108 #define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
111 #define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
114 #define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
117 #define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
120 #define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
123 #define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
128 #define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
129 #define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
130 #define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
135 #define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
140 #define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
149 #define AT_XDMAC_CC_PERID(i) ((0x7f & (i)) << 24) /* Channel Peripheral Identifier */
150 #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
151 #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
152 #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
154 #define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */
183 /* ----- Channels ----- */
187 u32 mask; /* Channel Mask */ member
188 u32 cfg; /* Channel Configuration Register */
208 /* ----- Controller ----- */
210 struct dma_device dma; member
221 /* ----- Descriptors ----- */
236 /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
251 return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40); in at_xdmac_chan_reg_base()
254 #define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
256 writel_relaxed((value), (atxdmac)->regs + (reg))
258 #define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
259 #define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
268 return &chan->dev->device; in chan2dev()
273 return container_of(ddev, struct at_xdmac, dma); in to_at_xdmac()
283 return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); in at_xdmac_chan_is_cyclic()
288 return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); in at_xdmac_chan_is_paused()
295 csize = ffs(maxburst) - 1; in at_xdmac_csize()
297 csize = -EINVAL; in at_xdmac_csize()
315 "initial descriptors per channel (default: 64)");
320 return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask; in at_xdmac_chan_is_enabled()
325 at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L); in at_xdmac_off()
331 at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L); in at_xdmac_off()
338 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_start_xfer()
341 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first); in at_xdmac_start_xfer()
344 first->active_xfer = true; in at_xdmac_start_xfer()
347 reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys) in at_xdmac_start_xfer()
348 | AT_XDMAC_CNDA_NDAIF(atchan->memif); in at_xdmac_start_xfer()
358 else if ((first->lld.mbr_ubc & in at_xdmac_start_xfer()
366 * properly. This bit can be modified only by using the channel in at_xdmac_start_xfer()
369 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); in at_xdmac_start_xfer()
376 dev_vdbg(chan2dev(&atchan->chan), in at_xdmac_start_xfer()
390 if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg)) in at_xdmac_start_xfer()
394 * There is no end of list when doing cyclic dma, we need to get in at_xdmac_start_xfer()
403 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask); in at_xdmac_start_xfer()
404 dev_vdbg(chan2dev(&atchan->chan), in at_xdmac_start_xfer()
405 "%s: enable channel (0x%08x)\n", __func__, atchan->mask); in at_xdmac_start_xfer()
407 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); in at_xdmac_start_xfer()
409 dev_vdbg(chan2dev(&atchan->chan), in at_xdmac_start_xfer()
423 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); in at_xdmac_tx_submit()
427 spin_lock_irqsave(&atchan->lock, irqflags); in at_xdmac_tx_submit()
430 list_add_tail(&desc->xfer_node, &atchan->xfers_list); in at_xdmac_tx_submit()
431 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_tx_submit()
433 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", in at_xdmac_tx_submit()
443 struct at_xdmac *atxdmac = to_at_xdmac(chan->device); in at_xdmac_alloc_desc()
446 desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys); in at_xdmac_alloc_desc()
448 INIT_LIST_HEAD(&desc->descs_list); in at_xdmac_alloc_desc()
449 dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan); in at_xdmac_alloc_desc()
450 desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit; in at_xdmac_alloc_desc()
451 desc->tx_dma_desc.phys = phys; in at_xdmac_alloc_desc()
459 memset(&desc->lld, 0, sizeof(desc->lld)); in at_xdmac_init_used_desc()
460 INIT_LIST_HEAD(&desc->descs_list); in at_xdmac_init_used_desc()
461 desc->direction = DMA_TRANS_NONE; in at_xdmac_init_used_desc()
462 desc->xfer_size = 0; in at_xdmac_init_used_desc()
463 desc->active_xfer = false; in at_xdmac_init_used_desc()
471 if (list_empty(&atchan->free_descs_list)) { in at_xdmac_get_desc()
472 desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT); in at_xdmac_get_desc()
474 desc = list_first_entry(&atchan->free_descs_list, in at_xdmac_get_desc()
476 list_del(&desc->desc_node); in at_xdmac_get_desc()
490 prev->lld.mbr_nda = desc->tx_dma_desc.phys; in at_xdmac_queue_desc()
491 prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE; in at_xdmac_queue_desc()
494 __func__, prev, &prev->lld.mbr_nda); in at_xdmac_queue_desc()
503 desc->lld.mbr_bc++; in at_xdmac_increment_block_count()
513 struct at_xdmac *atxdmac = of_dma->of_dma_data; in at_xdmac_xlate()
516 struct device *dev = atxdmac->dma.dev; in at_xdmac_xlate()
518 if (dma_spec->args_count != 1) { in at_xdmac_xlate()
519 dev_err(dev, "dma phandler args: bad number of args\n"); in at_xdmac_xlate()
523 chan = dma_get_any_slave_channel(&atxdmac->dma); in at_xdmac_xlate()
525 dev_err(dev, "can't get a dma channel\n"); in at_xdmac_xlate()
530 atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]); in at_xdmac_xlate()
531 atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]); in at_xdmac_xlate()
532 atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]); in at_xdmac_xlate()
534 atchan->memif, atchan->perif, atchan->perid); in at_xdmac_xlate()
546 atchan->cfg = in at_xdmac_compute_chan_conf()
547 AT91_XDMAC_DT_PERID(atchan->perid) in at_xdmac_compute_chan_conf()
550 | AT_XDMAC_CC_DIF(atchan->memif) in at_xdmac_compute_chan_conf()
551 | AT_XDMAC_CC_SIF(atchan->perif) in at_xdmac_compute_chan_conf()
556 csize = ffs(atchan->sconfig.src_maxburst) - 1; in at_xdmac_compute_chan_conf()
559 return -EINVAL; in at_xdmac_compute_chan_conf()
561 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); in at_xdmac_compute_chan_conf()
562 dwidth = ffs(atchan->sconfig.src_addr_width) - 1; in at_xdmac_compute_chan_conf()
565 return -EINVAL; in at_xdmac_compute_chan_conf()
567 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); in at_xdmac_compute_chan_conf()
569 atchan->cfg = in at_xdmac_compute_chan_conf()
570 AT91_XDMAC_DT_PERID(atchan->perid) in at_xdmac_compute_chan_conf()
573 | AT_XDMAC_CC_DIF(atchan->perif) in at_xdmac_compute_chan_conf()
574 | AT_XDMAC_CC_SIF(atchan->memif) in at_xdmac_compute_chan_conf()
579 csize = ffs(atchan->sconfig.dst_maxburst) - 1; in at_xdmac_compute_chan_conf()
582 return -EINVAL; in at_xdmac_compute_chan_conf()
584 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); in at_xdmac_compute_chan_conf()
585 dwidth = ffs(atchan->sconfig.dst_addr_width) - 1; in at_xdmac_compute_chan_conf()
588 return -EINVAL; in at_xdmac_compute_chan_conf()
590 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); in at_xdmac_compute_chan_conf()
593 dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg); in at_xdmac_compute_chan_conf()
605 if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE) in at_xdmac_check_slave_config()
606 || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE)) in at_xdmac_check_slave_config()
607 return -EINVAL; in at_xdmac_check_slave_config()
609 if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH) in at_xdmac_check_slave_config()
610 || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH)) in at_xdmac_check_slave_config()
611 return -EINVAL; in at_xdmac_check_slave_config()
623 return -EINVAL; in at_xdmac_set_slave_config()
626 memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig)); in at_xdmac_set_slave_config()
648 dev_err(chan2dev(chan), "invalid DMA direction\n"); in at_xdmac_prep_slave_sg()
658 spin_lock_irqsave(&atchan->lock, irqflags); in at_xdmac_prep_slave_sg()
681 list_splice_tail_init(&first->descs_list, in at_xdmac_prep_slave_sg()
682 &atchan->free_descs_list); in at_xdmac_prep_slave_sg()
688 desc->lld.mbr_sa = atchan->sconfig.src_addr; in at_xdmac_prep_slave_sg()
689 desc->lld.mbr_da = mem; in at_xdmac_prep_slave_sg()
691 desc->lld.mbr_sa = mem; in at_xdmac_prep_slave_sg()
692 desc->lld.mbr_da = atchan->sconfig.dst_addr; in at_xdmac_prep_slave_sg()
694 dwidth = at_xdmac_get_dwidth(atchan->cfg); in at_xdmac_prep_slave_sg()
698 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ in at_xdmac_prep_slave_sg()
702 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) | in at_xdmac_prep_slave_sg()
706 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); in at_xdmac_prep_slave_sg()
718 list_add_tail(&desc->desc_node, &first->descs_list); in at_xdmac_prep_slave_sg()
723 first->tx_dma_desc.flags = flags; in at_xdmac_prep_slave_sg()
724 first->xfer_size = xfer_size; in at_xdmac_prep_slave_sg()
725 first->direction = direction; in at_xdmac_prep_slave_sg()
726 ret = &first->tx_dma_desc; in at_xdmac_prep_slave_sg()
729 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_prep_slave_sg()
750 dev_err(chan2dev(chan), "invalid DMA direction\n"); in at_xdmac_prep_dma_cyclic()
754 if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) { in at_xdmac_prep_dma_cyclic()
755 dev_err(chan2dev(chan), "channel currently used\n"); in at_xdmac_prep_dma_cyclic()
765 spin_lock_irqsave(&atchan->lock, irqflags); in at_xdmac_prep_dma_cyclic()
770 list_splice_tail_init(&first->descs_list, in at_xdmac_prep_dma_cyclic()
771 &atchan->free_descs_list); in at_xdmac_prep_dma_cyclic()
772 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_prep_dma_cyclic()
775 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_prep_dma_cyclic()
778 __func__, desc, &desc->tx_dma_desc.phys); in at_xdmac_prep_dma_cyclic()
781 desc->lld.mbr_sa = atchan->sconfig.src_addr; in at_xdmac_prep_dma_cyclic()
782 desc->lld.mbr_da = buf_addr + i * period_len; in at_xdmac_prep_dma_cyclic()
784 desc->lld.mbr_sa = buf_addr + i * period_len; in at_xdmac_prep_dma_cyclic()
785 desc->lld.mbr_da = atchan->sconfig.dst_addr; in at_xdmac_prep_dma_cyclic()
787 desc->lld.mbr_cfg = atchan->cfg; in at_xdmac_prep_dma_cyclic()
788 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 in at_xdmac_prep_dma_cyclic()
791 | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg); in at_xdmac_prep_dma_cyclic()
795 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); in at_xdmac_prep_dma_cyclic()
807 list_add_tail(&desc->desc_node, &first->descs_list); in at_xdmac_prep_dma_cyclic()
811 first->tx_dma_desc.flags = flags; in at_xdmac_prep_dma_cyclic()
812 first->xfer_size = buf_len; in at_xdmac_prep_dma_cyclic()
813 first->direction = direction; in at_xdmac_prep_dma_cyclic()
815 return &first->tx_dma_desc; in at_xdmac_prep_dma_cyclic()
860 * WARNING: The channel configuration is set here since there is no in at_xdmac_interleaved_queue_desc()
868 * match the one of another channel. If not, it could lead to spurious in at_xdmac_interleaved_queue_desc()
877 dwidth = at_xdmac_align_width(chan, src | dst | chunk->size); in at_xdmac_interleaved_queue_desc()
878 if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) { in at_xdmac_interleaved_queue_desc()
881 __func__, chunk->size, in at_xdmac_interleaved_queue_desc()
890 if (xt->src_inc) { in at_xdmac_interleaved_queue_desc()
891 if (xt->src_sgl) in at_xdmac_interleaved_queue_desc()
897 if (xt->dst_inc) { in at_xdmac_interleaved_queue_desc()
898 if (xt->dst_sgl) in at_xdmac_interleaved_queue_desc()
904 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_interleaved_queue_desc()
906 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_interleaved_queue_desc()
914 ublen = chunk->size >> dwidth; in at_xdmac_interleaved_queue_desc()
916 desc->lld.mbr_sa = src; in at_xdmac_interleaved_queue_desc()
917 desc->lld.mbr_da = dst; in at_xdmac_interleaved_queue_desc()
918 desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk); in at_xdmac_interleaved_queue_desc()
919 desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk); in at_xdmac_interleaved_queue_desc()
921 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3 in at_xdmac_interleaved_queue_desc()
925 desc->lld.mbr_cfg = chan_cc; in at_xdmac_interleaved_queue_desc()
929 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, in at_xdmac_interleaved_queue_desc()
930 desc->lld.mbr_ubc, desc->lld.mbr_cfg); in at_xdmac_interleaved_queue_desc()
951 if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM)) in at_xdmac_prep_interleaved()
958 if ((xt->numf > 1) && (xt->frame_size > 1)) in at_xdmac_prep_interleaved()
962 __func__, &xt->src_start, &xt->dst_start, xt->numf, in at_xdmac_prep_interleaved()
963 xt->frame_size, flags); in at_xdmac_prep_interleaved()
965 src_addr = xt->src_start; in at_xdmac_prep_interleaved()
966 dst_addr = xt->dst_start; in at_xdmac_prep_interleaved()
968 if (xt->numf > 1) { in at_xdmac_prep_interleaved()
972 xt, xt->sgl); in at_xdmac_prep_interleaved()
977 for (i = 0; i < xt->numf - 1; i++) in at_xdmac_prep_interleaved()
982 list_add_tail(&first->desc_node, &first->descs_list); in at_xdmac_prep_interleaved()
984 for (i = 0; i < xt->frame_size; i++) { in at_xdmac_prep_interleaved()
988 chunk = xt->sgl + i; in at_xdmac_prep_interleaved()
993 src_skip = chunk->size + src_icg; in at_xdmac_prep_interleaved()
994 dst_skip = chunk->size + dst_icg; in at_xdmac_prep_interleaved()
998 __func__, chunk->size, src_icg, dst_icg); in at_xdmac_prep_interleaved()
1006 list_splice_tail_init(&first->descs_list, in at_xdmac_prep_interleaved()
1007 &atchan->free_descs_list); in at_xdmac_prep_interleaved()
1016 list_add_tail(&desc->desc_node, &first->descs_list); in at_xdmac_prep_interleaved()
1018 if (xt->src_sgl) in at_xdmac_prep_interleaved()
1021 if (xt->dst_sgl) in at_xdmac_prep_interleaved()
1024 len += chunk->size; in at_xdmac_prep_interleaved()
1029 first->tx_dma_desc.cookie = -EBUSY; in at_xdmac_prep_interleaved()
1030 first->tx_dma_desc.flags = flags; in at_xdmac_prep_interleaved()
1031 first->xfer_size = len; in at_xdmac_prep_interleaved()
1033 return &first->tx_dma_desc; in at_xdmac_prep_interleaved()
1053 * match the one of another channel. If not, it could lead to spurious in at_xdmac_prep_dma_memcpy()
1079 spin_lock_irqsave(&atchan->lock, irqflags); in at_xdmac_prep_dma_memcpy()
1081 spin_unlock_irqrestore(&atchan->lock, irqflags); in at_xdmac_prep_dma_memcpy()
1085 list_splice_tail_init(&first->descs_list, in at_xdmac_prep_dma_memcpy()
1086 &atchan->free_descs_list); in at_xdmac_prep_dma_memcpy()
1108 remaining_size -= xfer_size; in at_xdmac_prep_dma_memcpy()
1110 desc->lld.mbr_sa = src_addr; in at_xdmac_prep_dma_memcpy()
1111 desc->lld.mbr_da = dst_addr; in at_xdmac_prep_dma_memcpy()
1112 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 in at_xdmac_prep_dma_memcpy()
1116 desc->lld.mbr_cfg = chan_cc; in at_xdmac_prep_dma_memcpy()
1120 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg); in at_xdmac_prep_dma_memcpy()
1132 list_add_tail(&desc->desc_node, &first->descs_list); in at_xdmac_prep_dma_memcpy()
1135 first->tx_dma_desc.flags = flags; in at_xdmac_prep_dma_memcpy()
1136 first->xfer_size = len; in at_xdmac_prep_dma_memcpy()
1138 return &first->tx_dma_desc; in at_xdmac_prep_dma_memcpy()
1152 * WARNING: The channel configuration is set here since there is no in at_xdmac_memset_create_desc()
1160 * match the one of another channel. If not, it could lead to spurious in at_xdmac_memset_create_desc()
1181 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_memset_create_desc()
1183 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_memset_create_desc()
1193 desc->lld.mbr_da = dst_addr; in at_xdmac_memset_create_desc()
1194 desc->lld.mbr_ds = value; in at_xdmac_memset_create_desc()
1195 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3 in at_xdmac_memset_create_desc()
1199 desc->lld.mbr_cfg = chan_cc; in at_xdmac_memset_create_desc()
1203 __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, in at_xdmac_memset_create_desc()
1204 desc->lld.mbr_cfg); in at_xdmac_memset_create_desc()
1223 list_add_tail(&desc->desc_node, &desc->descs_list); in at_xdmac_prep_dma_memset()
1225 desc->tx_dma_desc.cookie = -EBUSY; in at_xdmac_prep_dma_memset()
1226 desc->tx_dma_desc.flags = flags; in at_xdmac_prep_dma_memset()
1227 desc->xfer_size = len; in at_xdmac_prep_dma_memset()
1229 return &desc->tx_dma_desc; in at_xdmac_prep_dma_memset()
1260 list_splice_tail_init(&first->descs_list, in at_xdmac_prep_dma_memset_sg()
1261 &atchan->free_descs_list); in at_xdmac_prep_dma_memset_sg()
1269 stride = sg_dma_address(sg) - in at_xdmac_prep_dma_memset_sg()
1281 * +-------+ +-------+ +-------+ in at_xdmac_prep_dma_memset_sg()
1282 * | N-2 | | N-1 | | N | in at_xdmac_prep_dma_memset_sg()
1283 * +-------+ +-------+ +-------+ in at_xdmac_prep_dma_memset_sg()
1285 * We need all these three elements (N-2, N-1 and N) in at_xdmac_prep_dma_memset_sg()
1287 * queue N-1 or reuse N-2. in at_xdmac_prep_dma_memset_sg()
1300 * N-2 descriptor in at_xdmac_prep_dma_memset_sg()
1303 ppdesc->lld.mbr_dus = stride; in at_xdmac_prep_dma_memset_sg()
1306 * Put back the N-1 descriptor in the in at_xdmac_prep_dma_memset_sg()
1309 list_add_tail(&pdesc->desc_node, in at_xdmac_prep_dma_memset_sg()
1310 &atchan->free_descs_list); in at_xdmac_prep_dma_memset_sg()
1313 * Make our N-1 descriptor pointer in at_xdmac_prep_dma_memset_sg()
1314 * point to the N-2 since they were in at_xdmac_prep_dma_memset_sg()
1330 * Queue the N-1 descriptor after the in at_xdmac_prep_dma_memset_sg()
1331 * N-2 in at_xdmac_prep_dma_memset_sg()
1336 * Add the N-1 descriptor to the list in at_xdmac_prep_dma_memset_sg()
1340 list_add_tail(&desc->desc_node, in at_xdmac_prep_dma_memset_sg()
1341 &first->descs_list); in at_xdmac_prep_dma_memset_sg()
1355 if ((i == (sg_len - 1)) && in at_xdmac_prep_dma_memset_sg()
1362 * Increment the block count of the N-1 in at_xdmac_prep_dma_memset_sg()
1366 pdesc->lld.mbr_dus = stride; in at_xdmac_prep_dma_memset_sg()
1372 list_add_tail(&desc->desc_node, in at_xdmac_prep_dma_memset_sg()
1373 &atchan->free_descs_list); in at_xdmac_prep_dma_memset_sg()
1387 first->tx_dma_desc.cookie = -EBUSY; in at_xdmac_prep_dma_memset_sg()
1388 first->tx_dma_desc.flags = flags; in at_xdmac_prep_dma_memset_sg()
1389 first->xfer_size = len; in at_xdmac_prep_dma_memset_sg()
1391 return &first->tx_dma_desc; in at_xdmac_prep_dma_memset_sg()
1399 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_tx_status()
1404 u32 cur_nda, check_nda, cur_ubc, mask, value; in at_xdmac_tx_status() local
1416 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_tx_status()
1418 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); in at_xdmac_tx_status()
1424 if (!desc->active_xfer) { in at_xdmac_tx_status()
1425 dma_set_residue(txstate, desc->xfer_size); in at_xdmac_tx_status()
1429 residue = desc->xfer_size; in at_xdmac_tx_status()
1437 * timeout, it requests the residue. If the data are in the DMA FIFO, in at_xdmac_tx_status()
1443 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; in at_xdmac_tx_status()
1445 if ((desc->lld.mbr_cfg & mask) == value) { in at_xdmac_tx_status()
1446 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); in at_xdmac_tx_status()
1452 * The easiest way to compute the residue should be to pause the DMA in at_xdmac_tx_status()
1456 * - DMA is running therefore a descriptor change is possible while in at_xdmac_tx_status()
1458 * - When the block transfer is done, the value of the CUBC register in at_xdmac_tx_status()
1463 * INITD -------- ------------ in at_xdmac_tx_status()
1503 if ((desc->lld.mbr_cfg & mask) == value) { in at_xdmac_tx_status()
1504 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); in at_xdmac_tx_status()
1514 descs_list = &desc->descs_list; in at_xdmac_tx_status()
1516 dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg); in at_xdmac_tx_status()
1517 residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth; in at_xdmac_tx_status()
1518 if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) { in at_xdmac_tx_status()
1529 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); in at_xdmac_tx_status()
1532 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_tx_status()
1541 * If channel is enabled, do nothing, advance_work will be triggered in at_xdmac_advance_work()
1544 if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) { in at_xdmac_advance_work()
1545 desc = list_first_entry(&atchan->xfers_list, in at_xdmac_advance_work()
1548 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); in at_xdmac_advance_work()
1549 if (!desc->active_xfer) in at_xdmac_advance_work()
1559 spin_lock_irq(&atchan->lock); in at_xdmac_handle_cyclic()
1560 if (list_empty(&atchan->xfers_list)) { in at_xdmac_handle_cyclic()
1561 spin_unlock_irq(&atchan->lock); in at_xdmac_handle_cyclic()
1564 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, in at_xdmac_handle_cyclic()
1566 spin_unlock_irq(&atchan->lock); in at_xdmac_handle_cyclic()
1567 txd = &desc->tx_dma_desc; in at_xdmac_handle_cyclic()
1568 if (txd->flags & DMA_PREP_INTERRUPT) in at_xdmac_handle_cyclic()
1574 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_handle_error()
1583 if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) in at_xdmac_handle_error()
1584 dev_err(chan2dev(&atchan->chan), "read bus error!!!"); in at_xdmac_handle_error()
1585 if (atchan->irq_status & AT_XDMAC_CIS_WBEIS) in at_xdmac_handle_error()
1586 dev_err(chan2dev(&atchan->chan), "write bus error!!!"); in at_xdmac_handle_error()
1587 if (atchan->irq_status & AT_XDMAC_CIS_ROIS) in at_xdmac_handle_error()
1588 dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); in at_xdmac_handle_error()
1590 spin_lock_irq(&atchan->lock); in at_xdmac_handle_error()
1592 /* Channel must be disabled first as it's not done automatically */ in at_xdmac_handle_error()
1593 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); in at_xdmac_handle_error()
1594 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) in at_xdmac_handle_error()
1597 bad_desc = list_first_entry(&atchan->xfers_list, in at_xdmac_handle_error()
1601 spin_unlock_irq(&atchan->lock); in at_xdmac_handle_error()
1604 dev_dbg(chan2dev(&atchan->chan), in at_xdmac_handle_error()
1606 __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da, in at_xdmac_handle_error()
1607 bad_desc->lld.mbr_ubc); in at_xdmac_handle_error()
1618 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n", in at_xdmac_tasklet()
1619 __func__, atchan->irq_status); in at_xdmac_tasklet()
1627 } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS) in at_xdmac_tasklet()
1628 || (atchan->irq_status & error_mask)) { in at_xdmac_tasklet()
1631 if (atchan->irq_status & error_mask) in at_xdmac_tasklet()
1634 spin_lock_irq(&atchan->lock); in at_xdmac_tasklet()
1635 desc = list_first_entry(&atchan->xfers_list, in at_xdmac_tasklet()
1638 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); in at_xdmac_tasklet()
1639 if (!desc->active_xfer) { in at_xdmac_tasklet()
1640 dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting"); in at_xdmac_tasklet()
1641 spin_unlock_irq(&atchan->lock); in at_xdmac_tasklet()
1645 txd = &desc->tx_dma_desc; in at_xdmac_tasklet()
1648 list_del(&desc->xfer_node); in at_xdmac_tasklet()
1649 spin_unlock_irq(&atchan->lock); in at_xdmac_tasklet()
1651 if (txd->flags & DMA_PREP_INTERRUPT) in at_xdmac_tasklet()
1656 spin_lock_irq(&atchan->lock); in at_xdmac_tasklet()
1658 list_splice_tail_init(&desc->descs_list, in at_xdmac_tasklet()
1659 &atchan->free_descs_list); in at_xdmac_tasklet()
1661 spin_unlock_irq(&atchan->lock); in at_xdmac_tasklet()
1678 dev_vdbg(atxdmac->dma.dev, in at_xdmac_interrupt()
1685 /* We have to find which channel has generated the interrupt. */ in at_xdmac_interrupt()
1686 for (i = 0; i < atxdmac->dma.chancnt; i++) { in at_xdmac_interrupt()
1690 atchan = &atxdmac->chan[i]; in at_xdmac_interrupt()
1693 atchan->irq_status = chan_status & chan_imr; in at_xdmac_interrupt()
1694 dev_vdbg(atxdmac->dma.dev, in at_xdmac_interrupt()
1697 dev_vdbg(chan2dev(&atchan->chan), in at_xdmac_interrupt()
1707 if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) in at_xdmac_interrupt()
1708 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); in at_xdmac_interrupt()
1710 tasklet_schedule(&atchan->tasklet); in at_xdmac_interrupt()
1724 dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__); in at_xdmac_issue_pending()
1726 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_issue_pending()
1728 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_issue_pending()
1742 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_device_config()
1744 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_config()
1752 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_device_pause()
1757 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) in at_xdmac_device_pause()
1760 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_device_pause()
1761 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); in at_xdmac_device_pause()
1765 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_pause()
1773 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_device_resume()
1778 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_device_resume()
1780 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_resume()
1784 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); in at_xdmac_device_resume()
1785 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); in at_xdmac_device_resume()
1786 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_resume()
1795 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); in at_xdmac_device_terminate_all()
1800 spin_lock_irqsave(&atchan->lock, flags); in at_xdmac_device_terminate_all()
1801 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); in at_xdmac_device_terminate_all()
1802 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) in at_xdmac_device_terminate_all()
1806 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) { in at_xdmac_device_terminate_all()
1807 list_del(&desc->xfer_node); in at_xdmac_device_terminate_all()
1808 list_splice_tail_init(&desc->descs_list, in at_xdmac_device_terminate_all()
1809 &atchan->free_descs_list); in at_xdmac_device_terminate_all()
1812 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); in at_xdmac_device_terminate_all()
1813 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); in at_xdmac_device_terminate_all()
1814 spin_unlock_irqrestore(&atchan->lock, flags); in at_xdmac_device_terminate_all()
1827 "can't allocate channel resources (channel enabled)\n"); in at_xdmac_alloc_chan_resources()
1828 return -EIO; in at_xdmac_alloc_chan_resources()
1831 if (!list_empty(&atchan->free_descs_list)) { in at_xdmac_alloc_chan_resources()
1833 "can't allocate channel resources (channel not free from a previous use)\n"); in at_xdmac_alloc_chan_resources()
1834 return -EIO; in at_xdmac_alloc_chan_resources()
1843 return -EIO; in at_xdmac_alloc_chan_resources()
1849 list_add_tail(&desc->desc_node, &atchan->free_descs_list); in at_xdmac_alloc_chan_resources()
1862 struct at_xdmac *atxdmac = to_at_xdmac(chan->device); in at_xdmac_free_chan_resources()
1865 list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) { in at_xdmac_free_chan_resources()
1867 list_del(&desc->desc_node); in at_xdmac_free_chan_resources()
1868 dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys); in at_xdmac_free_chan_resources()
1880 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { in atmel_xdmac_prepare()
1885 return -EAGAIN; in atmel_xdmac_prepare()
1899 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { in atmel_xdmac_suspend()
1902 atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC); in atmel_xdmac_suspend()
1906 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); in atmel_xdmac_suspend()
1907 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA); in atmel_xdmac_suspend()
1908 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC); in atmel_xdmac_suspend()
1911 atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM); in atmel_xdmac_suspend()
1912 atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS); in atmel_xdmac_suspend()
1915 clk_disable_unprepare(atxdmac->clk); in atmel_xdmac_suspend()
1927 ret = clk_prepare_enable(atxdmac->clk); in atmel_xdmac_resume()
1932 for (i = 0; i < atxdmac->dma.chancnt; i++) { in atmel_xdmac_resume()
1933 atchan = &atxdmac->chan[i]; in atmel_xdmac_resume()
1938 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim); in atmel_xdmac_resume()
1939 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { in atmel_xdmac_resume()
1941 at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc); in atmel_xdmac_resume()
1945 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda); in atmel_xdmac_resume()
1946 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc); in atmel_xdmac_resume()
1947 at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim); in atmel_xdmac_resume()
1949 if (atxdmac->save_gs & atchan->mask) in atmel_xdmac_resume()
1950 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); in atmel_xdmac_resume()
1980 dev_err(&pdev->dev, "invalid number of channels (%u)\n", in at_xdmac_probe()
1982 return -EINVAL; in at_xdmac_probe()
1987 atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); in at_xdmac_probe()
1989 dev_err(&pdev->dev, "can't allocate at_xdmac structure\n"); in at_xdmac_probe()
1990 return -ENOMEM; in at_xdmac_probe()
1993 atxdmac->regs = base; in at_xdmac_probe()
1994 atxdmac->irq = irq; in at_xdmac_probe()
1996 atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk"); in at_xdmac_probe()
1997 if (IS_ERR(atxdmac->clk)) { in at_xdmac_probe()
1998 dev_err(&pdev->dev, "can't get dma_clk\n"); in at_xdmac_probe()
1999 return PTR_ERR(atxdmac->clk); in at_xdmac_probe()
2003 ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac); in at_xdmac_probe()
2005 dev_err(&pdev->dev, "can't request irq\n"); in at_xdmac_probe()
2009 ret = clk_prepare_enable(atxdmac->clk); in at_xdmac_probe()
2011 dev_err(&pdev->dev, "can't prepare or enable clock\n"); in at_xdmac_probe()
2015 atxdmac->at_xdmac_desc_pool = in at_xdmac_probe()
2016 dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, in at_xdmac_probe()
2018 if (!atxdmac->at_xdmac_desc_pool) { in at_xdmac_probe()
2019 dev_err(&pdev->dev, "no memory for descriptors dma pool\n"); in at_xdmac_probe()
2020 ret = -ENOMEM; in at_xdmac_probe()
2024 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask); in at_xdmac_probe()
2025 dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask); in at_xdmac_probe()
2026 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask); in at_xdmac_probe()
2027 dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask); in at_xdmac_probe()
2028 dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask); in at_xdmac_probe()
2029 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask); in at_xdmac_probe()
2032 * one channel, second allocation fails in private_candidate. in at_xdmac_probe()
2034 dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask); in at_xdmac_probe()
2035 atxdmac->dma.dev = &pdev->dev; in at_xdmac_probe()
2036 atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources; in at_xdmac_probe()
2037 atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources; in at_xdmac_probe()
2038 atxdmac->dma.device_tx_status = at_xdmac_tx_status; in at_xdmac_probe()
2039 atxdmac->dma.device_issue_pending = at_xdmac_issue_pending; in at_xdmac_probe()
2040 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic; in at_xdmac_probe()
2041 atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved; in at_xdmac_probe()
2042 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; in at_xdmac_probe()
2043 atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset; in at_xdmac_probe()
2044 atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg; in at_xdmac_probe()
2045 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; in at_xdmac_probe()
2046 atxdmac->dma.device_config = at_xdmac_device_config; in at_xdmac_probe()
2047 atxdmac->dma.device_pause = at_xdmac_device_pause; in at_xdmac_probe()
2048 atxdmac->dma.device_resume = at_xdmac_device_resume; in at_xdmac_probe()
2049 atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all; in at_xdmac_probe()
2050 atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; in at_xdmac_probe()
2051 atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; in at_xdmac_probe()
2052 atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); in at_xdmac_probe()
2053 atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in at_xdmac_probe()
2059 INIT_LIST_HEAD(&atxdmac->dma.channels); in at_xdmac_probe()
2061 struct at_xdmac_chan *atchan = &atxdmac->chan[i]; in at_xdmac_probe()
2063 atchan->chan.device = &atxdmac->dma; in at_xdmac_probe()
2064 list_add_tail(&atchan->chan.device_node, in at_xdmac_probe()
2065 &atxdmac->dma.channels); in at_xdmac_probe()
2067 atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i); in at_xdmac_probe()
2068 atchan->mask = 1 << i; in at_xdmac_probe()
2070 spin_lock_init(&atchan->lock); in at_xdmac_probe()
2071 INIT_LIST_HEAD(&atchan->xfers_list); in at_xdmac_probe()
2072 INIT_LIST_HEAD(&atchan->free_descs_list); in at_xdmac_probe()
2073 tasklet_setup(&atchan->tasklet, at_xdmac_tasklet); in at_xdmac_probe()
2081 ret = dma_async_device_register(&atxdmac->dma); in at_xdmac_probe()
2083 dev_err(&pdev->dev, "fail to register DMA engine device\n"); in at_xdmac_probe()
2087 ret = of_dma_controller_register(pdev->dev.of_node, in at_xdmac_probe()
2090 dev_err(&pdev->dev, "could not register of dma controller\n"); in at_xdmac_probe()
2094 dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n", in at_xdmac_probe()
2095 nr_channels, atxdmac->regs); in at_xdmac_probe()
2100 dma_async_device_unregister(&atxdmac->dma); in at_xdmac_probe()
2102 clk_disable_unprepare(atxdmac->clk); in at_xdmac_probe()
2104 free_irq(atxdmac->irq, atxdmac); in at_xdmac_probe()
2114 of_dma_controller_free(pdev->dev.of_node); in at_xdmac_remove()
2115 dma_async_device_unregister(&atxdmac->dma); in at_xdmac_remove()
2116 clk_disable_unprepare(atxdmac->clk); in at_xdmac_remove()
2118 free_irq(atxdmac->irq, atxdmac); in at_xdmac_remove()
2120 for (i = 0; i < atxdmac->dma.chancnt; i++) { in at_xdmac_remove()
2121 struct at_xdmac_chan *atchan = &atxdmac->chan[i]; in at_xdmac_remove()
2123 tasklet_kill(&atchan->tasklet); in at_xdmac_remove()
2124 at_xdmac_free_chan_resources(&atchan->chan); in at_xdmac_remove()
2137 .compatible = "atmel,sama5d4-dma",
2160 MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");