Lines Matching +full:max +full:- +full:burst +full:- +full:len
17 #include <linux/dma-mapping.h>
37 #include "../virt-dma.h"
79 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
109 #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
110 #define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */
111 #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
112 #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
113 #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
120 * Max of 20 segments per channel to conserve PaRAM slots
123 * fail. Today davinci-pcm is the only user of this driver and
130 #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
131 #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
138 * reg0: channel/event 0-31
139 * reg1: channel/event 32-63
142 * bit 0-4 (0x1f) is the bit offset within the register
173 u32 len; member
192 * - processed_stat: the number of SG elements we have traversed
199 * - residue: The amount of bytes we have left to transfer for this desc
201 * - residue_stat: The residue in bytes of data we have covered
205 * - sg_len: Tracks the length of the current intermediate transfer,
296 .compatible = "ti,edma3-tpcc",
304 { .compatible = "ti,edma3-tptc", },
311 return (unsigned int)__raw_readl(ecc->base + offset); in edma_read()
316 __raw_writel(val, ecc->base + offset); in edma_write()
444 struct edma_cc *ecc = echan->ecc; in edma_set_chmap()
445 int channel = EDMA_CHAN_SLOT(echan->ch_num); in edma_set_chmap()
447 if (ecc->chmap_exist) { in edma_set_chmap()
455 struct edma_cc *ecc = echan->ecc; in edma_setup_interrupt()
456 int channel = EDMA_CHAN_SLOT(echan->ch_num); in edma_setup_interrupt()
475 if (slot >= ecc->num_slots) in edma_write_slot()
477 memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE); in edma_write_slot()
484 if (slot >= ecc->num_slots) in edma_read_slot()
485 return -EINVAL; in edma_read_slot()
486 memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE); in edma_read_slot()
492 * edma_alloc_slot - allocate DMA parameter RAM
511 if (ecc->chmap_exist && slot < ecc->num_channels) in edma_alloc_slot()
516 if (ecc->chmap_exist) in edma_alloc_slot()
519 slot = ecc->num_channels; in edma_alloc_slot()
521 slot = find_next_zero_bit(ecc->slot_inuse, in edma_alloc_slot()
522 ecc->num_slots, in edma_alloc_slot()
524 if (slot == ecc->num_slots) in edma_alloc_slot()
525 return -ENOMEM; in edma_alloc_slot()
526 if (!test_and_set_bit(slot, ecc->slot_inuse)) in edma_alloc_slot()
529 } else if (slot >= ecc->num_slots) { in edma_alloc_slot()
530 return -EINVAL; in edma_alloc_slot()
531 } else if (test_and_set_bit(slot, ecc->slot_inuse)) { in edma_alloc_slot()
532 return -EBUSY; in edma_alloc_slot()
537 return EDMA_CTLR_CHAN(ecc->id, slot); in edma_alloc_slot()
543 if (slot >= ecc->num_slots) in edma_free_slot()
547 clear_bit(slot, ecc->slot_inuse); in edma_free_slot()
551 * edma_link - link one parameter RAM slot to another
561 dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n"); in edma_link()
565 if (from >= ecc->num_slots || to >= ecc->num_slots) in edma_link()
573 * edma_get_position - returns the current transfer point
600 struct edma_cc *ecc = echan->ecc; in edma_start()
601 int channel = EDMA_CHAN_SLOT(echan->ch_num); in edma_start()
605 if (!echan->hw_triggered) { in edma_start()
607 dev_dbg(ecc->dev, "ESR%d %08x\n", idx, in edma_start()
612 dev_dbg(ecc->dev, "ER%d %08x\n", idx, in edma_start()
620 dev_dbg(ecc->dev, "EER%d %08x\n", idx, in edma_start()
627 struct edma_cc *ecc = echan->ecc; in edma_stop()
628 int channel = EDMA_CHAN_SLOT(echan->ch_num); in edma_stop()
640 dev_dbg(ecc->dev, "EER%d %08x\n", idx, in edma_stop()
654 int channel = EDMA_CHAN_SLOT(echan->ch_num); in edma_pause()
656 edma_shadow0_write_array(echan->ecc, SH_EECR, in edma_pause()
661 /* Re-enable EDMA hardware events on the specified channel. */
664 int channel = EDMA_CHAN_SLOT(echan->ch_num); in edma_resume()
666 edma_shadow0_write_array(echan->ecc, SH_EESR, in edma_resume()
673 struct edma_cc *ecc = echan->ecc; in edma_trigger_channel()
674 int channel = EDMA_CHAN_SLOT(echan->ch_num); in edma_trigger_channel()
680 dev_dbg(ecc->dev, "ESR%d %08x\n", idx, in edma_trigger_channel()
686 struct edma_cc *ecc = echan->ecc; in edma_clean_channel()
687 int channel = EDMA_CHAN_SLOT(echan->ch_num); in edma_clean_channel()
691 dev_dbg(ecc->dev, "EMR%d %08x\n", idx, in edma_clean_channel()
705 struct edma_cc *ecc = echan->ecc; in edma_assign_channel_eventq()
706 int channel = EDMA_CHAN_SLOT(echan->ch_num); in edma_assign_channel_eventq()
711 eventq_no = ecc->default_queue; in edma_assign_channel_eventq()
712 if (eventq_no >= ecc->num_tc) in edma_assign_channel_eventq()
723 struct edma_cc *ecc = echan->ecc; in edma_alloc_channel()
724 int channel = EDMA_CHAN_SLOT(echan->ch_num); in edma_alloc_channel()
726 if (!test_bit(echan->ch_num, ecc->channels_mask)) { in edma_alloc_channel()
727 dev_err(ecc->dev, "Channel%d is reserved, can not be used!\n", in edma_alloc_channel()
728 echan->ch_num); in edma_alloc_channel()
729 return -EINVAL; in edma_alloc_channel()
777 struct edma_cc *ecc = echan->ecc; in edma_execute()
780 struct device *dev = echan->vchan.chan.device->dev; in edma_execute()
783 if (!echan->edesc) { in edma_execute()
785 vdesc = vchan_next_desc(&echan->vchan); in edma_execute()
788 list_del(&vdesc->node); in edma_execute()
789 echan->edesc = to_edma_desc(&vdesc->tx); in edma_execute()
792 edesc = echan->edesc; in edma_execute()
795 left = edesc->pset_nr - edesc->processed; in edma_execute()
797 edesc->sg_len = 0; in edma_execute()
801 j = i + edesc->processed; in edma_execute()
802 edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param); in edma_execute()
803 edesc->sg_len += edesc->pset[j].len; in edma_execute()
816 j, echan->ch_num, echan->slot[i], in edma_execute()
817 edesc->pset[j].param.opt, in edma_execute()
818 edesc->pset[j].param.src, in edma_execute()
819 edesc->pset[j].param.dst, in edma_execute()
820 edesc->pset[j].param.a_b_cnt, in edma_execute()
821 edesc->pset[j].param.ccnt, in edma_execute()
822 edesc->pset[j].param.src_dst_bidx, in edma_execute()
823 edesc->pset[j].param.src_dst_cidx, in edma_execute()
824 edesc->pset[j].param.link_bcntrld); in edma_execute()
826 if (i != (nslots - 1)) in edma_execute()
827 edma_link(ecc, echan->slot[i], echan->slot[i + 1]); in edma_execute()
830 edesc->processed += nslots; in edma_execute()
833 * If this is either the last set in a set of SG-list transactions in edma_execute()
837 if (edesc->processed == edesc->pset_nr) { in edma_execute()
838 if (edesc->cyclic) in edma_execute()
839 edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]); in edma_execute()
841 edma_link(ecc, echan->slot[nslots - 1], in edma_execute()
842 echan->ecc->dummy_slot); in edma_execute()
845 if (echan->missed) { in edma_execute()
851 dev_dbg(dev, "missed event on channel %d\n", echan->ch_num); in edma_execute()
856 echan->missed = 0; in edma_execute()
857 } else if (edesc->processed <= MAX_NR_SG) { in edma_execute()
859 echan->ch_num); in edma_execute()
863 echan->ch_num, edesc->processed); in edma_execute()
874 spin_lock_irqsave(&echan->vchan.lock, flags); in edma_terminate_all()
879 * echan->edesc is NULL and exit.) in edma_terminate_all()
881 if (echan->edesc) { in edma_terminate_all()
884 if (!echan->tc && echan->edesc->cyclic) in edma_terminate_all()
887 vchan_terminate_vdesc(&echan->edesc->vdesc); in edma_terminate_all()
888 echan->edesc = NULL; in edma_terminate_all()
891 vchan_get_all_descriptors(&echan->vchan, &head); in edma_terminate_all()
892 spin_unlock_irqrestore(&echan->vchan.lock, flags); in edma_terminate_all()
893 vchan_dma_desc_free_list(&echan->vchan, &head); in edma_terminate_all()
902 vchan_synchronize(&echan->vchan); in edma_synchronize()
910 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || in edma_slave_config()
911 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) in edma_slave_config()
912 return -EINVAL; in edma_slave_config()
914 if (cfg->src_maxburst > chan->device->max_burst || in edma_slave_config()
915 cfg->dst_maxburst > chan->device->max_burst) in edma_slave_config()
916 return -EINVAL; in edma_slave_config()
918 memcpy(&echan->cfg, cfg, sizeof(echan->cfg)); in edma_slave_config()
927 if (!echan->edesc) in edma_dma_pause()
928 return -EINVAL; in edma_dma_pause()
948 * @burst: In units of dev_width, how much to send
954 dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, in edma_config_pset() argument
959 struct device *dev = chan->device->dev; in edma_config_pset()
960 struct edmacc_param *param = &epset->param; in edma_config_pset()
966 if (!burst) in edma_config_pset()
967 burst = 1; in edma_config_pset()
970 * A-synced transfers. This allows for large contiguous in edma_config_pset()
973 if (burst == 1) { in edma_config_pset()
975 * For the A-sync case, bcnt and ccnt are the remainder in edma_config_pset()
977 * (dma_length / acnt) by (SZ_64K -1). This is so in edma_config_pset()
979 * Note: In A-sync tranfer only, bcntrld is used, but it in edma_config_pset()
981 * In this case, the best way adopted is- bccnt for the in edma_config_pset()
983 * every successive frame, bcnt will be SZ_64K-1. This in edma_config_pset()
987 ccnt = dma_length / acnt / (SZ_64K - 1); in edma_config_pset()
988 bcnt = dma_length / acnt - ccnt * (SZ_64K - 1); in edma_config_pset()
990 * If bcnt is non-zero, we have a remainder and hence an in edma_config_pset()
996 bcnt = SZ_64K - 1; in edma_config_pset()
1001 * use AB-synced transfers where A count is the fifo in edma_config_pset()
1005 * to SZ_64K-1. This places an upper bound on the length in edma_config_pset()
1009 bcnt = burst; in edma_config_pset()
1011 if (ccnt > (SZ_64K - 1)) { in edma_config_pset()
1012 dev_err(dev, "Exceeded max SG segment size\n"); in edma_config_pset()
1013 return -EINVAL; in edma_config_pset()
1018 epset->len = dma_length; in edma_config_pset()
1025 epset->addr = src_addr; in edma_config_pset()
1031 epset->addr = dst_addr; in edma_config_pset()
1037 epset->addr = src_addr; in edma_config_pset()
1040 return -EINVAL; in edma_config_pset()
1043 param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); in edma_config_pset()
1046 param->opt |= SYNCDIM; in edma_config_pset()
1048 param->src = src_addr; in edma_config_pset()
1049 param->dst = dst_addr; in edma_config_pset()
1051 param->src_dst_bidx = (dst_bidx << 16) | src_bidx; in edma_config_pset()
1052 param->src_dst_cidx = (dst_cidx << 16) | src_cidx; in edma_config_pset()
1054 param->a_b_cnt = bcnt << 16 | acnt; in edma_config_pset()
1055 param->ccnt = ccnt; in edma_config_pset()
1058 * A-sync case, and in this case, a requirement of reload value in edma_config_pset()
1059 * of SZ_64K-1 only is assured. 'link' is initially set to NULL in edma_config_pset()
1062 param->link_bcntrld = 0xffffffff; in edma_config_pset()
1072 struct device *dev = chan->device->dev; in edma_prep_slave_sg()
1076 u32 burst; in edma_prep_slave_sg() local
1084 src_addr = echan->cfg.src_addr; in edma_prep_slave_sg()
1085 dev_width = echan->cfg.src_addr_width; in edma_prep_slave_sg()
1086 burst = echan->cfg.src_maxburst; in edma_prep_slave_sg()
1088 dst_addr = echan->cfg.dst_addr; in edma_prep_slave_sg()
1089 dev_width = echan->cfg.dst_addr_width; in edma_prep_slave_sg()
1090 burst = echan->cfg.dst_maxburst; in edma_prep_slave_sg()
1105 edesc->pset_nr = sg_len; in edma_prep_slave_sg()
1106 edesc->residue = 0; in edma_prep_slave_sg()
1107 edesc->direction = direction; in edma_prep_slave_sg()
1108 edesc->echan = echan; in edma_prep_slave_sg()
1114 if (echan->slot[i] < 0) { in edma_prep_slave_sg()
1115 echan->slot[i] = in edma_prep_slave_sg()
1116 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY); in edma_prep_slave_sg()
1117 if (echan->slot[i] < 0) { in edma_prep_slave_sg()
1134 ret = edma_config_pset(chan, &edesc->pset[i], src_addr, in edma_prep_slave_sg()
1135 dst_addr, burst, dev_width, in edma_prep_slave_sg()
1142 edesc->absync = ret; in edma_prep_slave_sg()
1143 edesc->residue += sg_dma_len(sg); in edma_prep_slave_sg()
1145 if (i == sg_len - 1) in edma_prep_slave_sg()
1147 edesc->pset[i].param.opt |= TCINTEN; in edma_prep_slave_sg()
1155 edesc->pset[i].param.opt |= (TCINTEN | TCCMODE); in edma_prep_slave_sg()
1157 edesc->residue_stat = edesc->residue; in edma_prep_slave_sg()
1159 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); in edma_prep_slave_sg()
1164 size_t len, unsigned long tx_flags) in edma_prep_dma_memcpy() argument
1168 struct device *dev = chan->device->dev; in edma_prep_dma_memcpy()
1172 if (unlikely(!echan || !len)) in edma_prep_dma_memcpy()
1176 switch (__ffs((src | dest | len))) { in edma_prep_dma_memcpy()
1178 array_size = SZ_32K - 1; in edma_prep_dma_memcpy()
1181 array_size = SZ_32K - 2; in edma_prep_dma_memcpy()
1184 array_size = SZ_32K - 4; in edma_prep_dma_memcpy()
1188 if (len < SZ_64K) { in edma_prep_dma_memcpy()
1191 * slot and with one burst. in edma_prep_dma_memcpy()
1194 width = len; in edma_prep_dma_memcpy()
1195 pset_len = len; in edma_prep_dma_memcpy()
1204 * ACNT = full_length - length1, length2 = ACNT in edma_prep_dma_memcpy()
1210 pset_len = rounddown(len, width); in edma_prep_dma_memcpy()
1211 /* One slot is enough for lengths multiple of (SZ_32K -1) */ in edma_prep_dma_memcpy()
1212 if (unlikely(pset_len == len)) in edma_prep_dma_memcpy()
1222 edesc->pset_nr = nslots; in edma_prep_dma_memcpy()
1223 edesc->residue = edesc->residue_stat = len; in edma_prep_dma_memcpy()
1224 edesc->direction = DMA_MEM_TO_MEM; in edma_prep_dma_memcpy()
1225 edesc->echan = echan; in edma_prep_dma_memcpy()
1227 ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1, in edma_prep_dma_memcpy()
1234 edesc->absync = ret; in edma_prep_dma_memcpy()
1236 edesc->pset[0].param.opt |= ITCCHEN; in edma_prep_dma_memcpy()
1240 edesc->pset[0].param.opt |= TCINTEN; in edma_prep_dma_memcpy()
1243 edesc->pset[0].param.opt |= TCCHEN; in edma_prep_dma_memcpy()
1245 if (echan->slot[1] < 0) { in edma_prep_dma_memcpy()
1246 echan->slot[1] = edma_alloc_slot(echan->ecc, in edma_prep_dma_memcpy()
1248 if (echan->slot[1] < 0) { in edma_prep_dma_memcpy()
1257 pset_len = width = len % array_size; in edma_prep_dma_memcpy()
1259 ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, in edma_prep_dma_memcpy()
1266 edesc->pset[1].param.opt |= ITCCHEN; in edma_prep_dma_memcpy()
1269 edesc->pset[1].param.opt |= TCINTEN; in edma_prep_dma_memcpy()
1273 edesc->polled = true; in edma_prep_dma_memcpy()
1275 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); in edma_prep_dma_memcpy()
1283 struct device *dev = chan->device->dev; in edma_prep_dma_interleaved()
1291 if (is_slave_direction(xt->dir)) in edma_prep_dma_interleaved()
1294 if (xt->frame_size != 1 || xt->numf == 0) in edma_prep_dma_interleaved()
1297 if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K) in edma_prep_dma_interleaved()
1300 src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]); in edma_prep_dma_interleaved()
1302 src_bidx = src_icg + xt->sgl[0].size; in edma_prep_dma_interleaved()
1303 } else if (xt->src_inc) { in edma_prep_dma_interleaved()
1304 src_bidx = xt->sgl[0].size; in edma_prep_dma_interleaved()
1311 dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]); in edma_prep_dma_interleaved()
1313 dst_bidx = dst_icg + xt->sgl[0].size; in edma_prep_dma_interleaved()
1314 } else if (xt->dst_inc) { in edma_prep_dma_interleaved()
1315 dst_bidx = xt->sgl[0].size; in edma_prep_dma_interleaved()
1329 edesc->direction = DMA_MEM_TO_MEM; in edma_prep_dma_interleaved()
1330 edesc->echan = echan; in edma_prep_dma_interleaved()
1331 edesc->pset_nr = 1; in edma_prep_dma_interleaved()
1333 param = &edesc->pset[0].param; in edma_prep_dma_interleaved()
1335 param->src = xt->src_start; in edma_prep_dma_interleaved()
1336 param->dst = xt->dst_start; in edma_prep_dma_interleaved()
1337 param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size; in edma_prep_dma_interleaved()
1338 param->ccnt = 1; in edma_prep_dma_interleaved()
1339 param->src_dst_bidx = (dst_bidx << 16) | src_bidx; in edma_prep_dma_interleaved()
1340 param->src_dst_cidx = 0; in edma_prep_dma_interleaved()
1342 param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); in edma_prep_dma_interleaved()
1343 param->opt |= ITCCHEN; in edma_prep_dma_interleaved()
1346 param->opt |= TCINTEN; in edma_prep_dma_interleaved()
1348 edesc->polled = true; in edma_prep_dma_interleaved()
1350 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); in edma_prep_dma_interleaved()
1359 struct device *dev = chan->device->dev; in edma_prep_dma_cyclic()
1364 u32 burst; in edma_prep_dma_cyclic() local
1371 src_addr = echan->cfg.src_addr; in edma_prep_dma_cyclic()
1373 dev_width = echan->cfg.src_addr_width; in edma_prep_dma_cyclic()
1374 burst = echan->cfg.src_maxburst; in edma_prep_dma_cyclic()
1377 dst_addr = echan->cfg.dst_addr; in edma_prep_dma_cyclic()
1378 dev_width = echan->cfg.dst_addr_width; in edma_prep_dma_cyclic()
1379 burst = echan->cfg.dst_maxburst; in edma_prep_dma_cyclic()
1407 * If the burst and period sizes are the same, we can put in edma_prep_dma_cyclic()
1410 * after each burst, which is also after each desired period. in edma_prep_dma_cyclic()
1412 if (burst == period_len) { in edma_prep_dma_cyclic()
1425 edesc->cyclic = 1; in edma_prep_dma_cyclic()
1426 edesc->pset_nr = nslots; in edma_prep_dma_cyclic()
1427 edesc->residue = edesc->residue_stat = buf_len; in edma_prep_dma_cyclic()
1428 edesc->direction = direction; in edma_prep_dma_cyclic()
1429 edesc->echan = echan; in edma_prep_dma_cyclic()
1432 __func__, echan->ch_num, nslots, period_len, buf_len); in edma_prep_dma_cyclic()
1436 if (echan->slot[i] < 0) { in edma_prep_dma_cyclic()
1437 echan->slot[i] = in edma_prep_dma_cyclic()
1438 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY); in edma_prep_dma_cyclic()
1439 if (echan->slot[i] < 0) { in edma_prep_dma_cyclic()
1447 if (i == nslots - 1) { in edma_prep_dma_cyclic()
1448 memcpy(&edesc->pset[i], &edesc->pset[0], in edma_prep_dma_cyclic()
1449 sizeof(edesc->pset[0])); in edma_prep_dma_cyclic()
1453 ret = edma_config_pset(chan, &edesc->pset[i], src_addr, in edma_prep_dma_cyclic()
1454 dst_addr, burst, dev_width, period_len, in edma_prep_dma_cyclic()
1479 i, echan->ch_num, echan->slot[i], in edma_prep_dma_cyclic()
1480 edesc->pset[i].param.opt, in edma_prep_dma_cyclic()
1481 edesc->pset[i].param.src, in edma_prep_dma_cyclic()
1482 edesc->pset[i].param.dst, in edma_prep_dma_cyclic()
1483 edesc->pset[i].param.a_b_cnt, in edma_prep_dma_cyclic()
1484 edesc->pset[i].param.ccnt, in edma_prep_dma_cyclic()
1485 edesc->pset[i].param.src_dst_bidx, in edma_prep_dma_cyclic()
1486 edesc->pset[i].param.src_dst_cidx, in edma_prep_dma_cyclic()
1487 edesc->pset[i].param.link_bcntrld); in edma_prep_dma_cyclic()
1489 edesc->absync = ret; in edma_prep_dma_cyclic()
1495 edesc->pset[i].param.opt |= TCINTEN; in edma_prep_dma_cyclic()
1499 edesc->pset[i].param.opt |= ITCINTEN; in edma_prep_dma_cyclic()
1504 if (!echan->tc) in edma_prep_dma_cyclic()
1507 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); in edma_prep_dma_cyclic()
1512 struct device *dev = echan->vchan.chan.device->dev; in edma_completion_handler()
1515 spin_lock(&echan->vchan.lock); in edma_completion_handler()
1516 edesc = echan->edesc; in edma_completion_handler()
1518 if (edesc->cyclic) { in edma_completion_handler()
1519 vchan_cyclic_callback(&edesc->vdesc); in edma_completion_handler()
1520 spin_unlock(&echan->vchan.lock); in edma_completion_handler()
1522 } else if (edesc->processed == edesc->pset_nr) { in edma_completion_handler()
1523 edesc->residue = 0; in edma_completion_handler()
1525 vchan_cookie_complete(&edesc->vdesc); in edma_completion_handler()
1526 echan->edesc = NULL; in edma_completion_handler()
1529 echan->ch_num); in edma_completion_handler()
1532 echan->ch_num); in edma_completion_handler()
1537 edesc->residue -= edesc->sg_len; in edma_completion_handler()
1538 edesc->residue_stat = edesc->residue; in edma_completion_handler()
1539 edesc->processed_stat = edesc->processed; in edma_completion_handler()
1544 spin_unlock(&echan->vchan.lock); in edma_completion_handler()
1556 ctlr = ecc->id; in dma_irq_handler()
1560 dev_vdbg(ecc->dev, "dma_irq_handler\n"); in dma_irq_handler()
1585 edma_completion_handler(&ecc->slave_chans[channel]); in dma_irq_handler()
1595 struct edma_cc *ecc = echan->ecc; in edma_error_handler()
1596 struct device *dev = echan->vchan.chan.device->dev; in edma_error_handler()
1600 if (!echan->edesc) in edma_error_handler()
1603 spin_lock(&echan->vchan.lock); in edma_error_handler()
1605 err = edma_read_slot(ecc, echan->slot[0], &p); in edma_error_handler()
1621 echan->missed = 1; in edma_error_handler()
1633 spin_unlock(&echan->vchan.lock); in edma_error_handler()
1655 ctlr = ecc->id; in dma_ccerr_handler()
1659 dev_vdbg(ecc->dev, "dma_ccerr_handler\n"); in dma_ccerr_handler()
1665 * Ask eDMA to re-evaluate the error registers. in dma_ccerr_handler()
1667 dev_err(ecc->dev, "%s: Error interrupt without error event!\n", in dma_ccerr_handler()
1682 dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val); in dma_ccerr_handler()
1693 edma_error_handler(&ecc->slave_chans[k]); in dma_ccerr_handler()
1699 dev_dbg(ecc->dev, "QEMR 0x%02x\n", val); in dma_ccerr_handler()
1707 dev_warn(ecc->dev, "CCERR 0x%08x\n", val); in dma_ccerr_handler()
1726 struct edma_cc *ecc = echan->ecc; in edma_alloc_chan_resources()
1727 struct device *dev = ecc->dev; in edma_alloc_chan_resources()
1731 if (echan->tc) { in edma_alloc_chan_resources()
1732 eventq_no = echan->tc->id; in edma_alloc_chan_resources()
1733 } else if (ecc->tc_list) { in edma_alloc_chan_resources()
1735 echan->tc = &ecc->tc_list[ecc->info->default_queue]; in edma_alloc_chan_resources()
1736 eventq_no = echan->tc->id; in edma_alloc_chan_resources()
1743 echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num); in edma_alloc_chan_resources()
1744 if (echan->slot[0] < 0) { in edma_alloc_chan_resources()
1746 EDMA_CHAN_SLOT(echan->ch_num)); in edma_alloc_chan_resources()
1747 ret = echan->slot[0]; in edma_alloc_chan_resources()
1751 /* Set up channel -> slot mapping for the entry slot */ in edma_alloc_chan_resources()
1752 edma_set_chmap(echan, echan->slot[0]); in edma_alloc_chan_resources()
1753 echan->alloced = true; in edma_alloc_chan_resources()
1756 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id, in edma_alloc_chan_resources()
1757 echan->hw_triggered ? "HW" : "SW"); in edma_alloc_chan_resources()
1770 struct device *dev = echan->ecc->dev; in edma_free_chan_resources()
1776 vchan_free_chan_resources(&echan->vchan); in edma_free_chan_resources()
1780 if (echan->slot[i] >= 0) { in edma_free_chan_resources()
1781 edma_free_slot(echan->ecc, echan->slot[i]); in edma_free_chan_resources()
1782 echan->slot[i] = -1; in edma_free_chan_resources()
1787 edma_set_chmap(echan, echan->ecc->dummy_slot); in edma_free_chan_resources()
1790 if (echan->alloced) { in edma_free_chan_resources()
1792 echan->alloced = false; in edma_free_chan_resources()
1795 echan->tc = NULL; in edma_free_chan_resources()
1796 echan->hw_triggered = false; in edma_free_chan_resources()
1799 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id); in edma_free_chan_resources()
1808 spin_lock_irqsave(&echan->vchan.lock, flags); in edma_issue_pending()
1809 if (vchan_issue_pending(&echan->vchan) && !echan->edesc) in edma_issue_pending()
1811 spin_unlock_irqrestore(&echan->vchan.lock, flags); in edma_issue_pending()
1819 * RX-FIFO, as many as 55 loops have been seen.
1825 bool dst = edesc->direction == DMA_DEV_TO_MEM; in edma_residue()
1827 struct edma_chan *echan = edesc->echan; in edma_residue()
1828 struct edma_pset *pset = edesc->pset; in edma_residue()
1830 int channel = EDMA_CHAN_SLOT(echan->ch_num); in edma_residue()
1840 pos = edma_get_position(echan->ecc, echan->slot[0], dst); in edma_residue()
1850 if (is_slave_direction(edesc->direction)) in edma_residue()
1856 while (edma_shadow0_read_array(echan->ecc, event_reg, idx) & ch_bit) { in edma_residue()
1857 pos = edma_get_position(echan->ecc, echan->slot[0], dst); in edma_residue()
1861 if (!--loop_count) { in edma_residue()
1862 dev_dbg_ratelimited(echan->vchan.chan.device->dev, in edma_residue()
1874 * We never update edesc->residue in the cyclic case, so we in edma_residue()
1878 if (edesc->cyclic) { in edma_residue()
1879 done = pos - pset->addr; in edma_residue()
1880 edesc->residue_stat = edesc->residue - done; in edma_residue()
1881 return edesc->residue_stat; in edma_residue()
1894 pset += edesc->processed_stat; in edma_residue()
1896 for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) { in edma_residue()
1902 if (pos >= pset->addr && pos < pset->addr + pset->len) in edma_residue()
1903 return edesc->residue_stat - (pos - pset->addr); in edma_residue()
1906 edesc->processed_stat++; in edma_residue()
1907 edesc->residue_stat -= pset->len; in edma_residue()
1909 return edesc->residue_stat; in edma_residue()
1931 spin_lock_irqsave(&echan->vchan.lock, flags); in edma_tx_status()
1932 if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { in edma_tx_status()
1933 txstate->residue = edma_residue(echan->edesc); in edma_tx_status()
1935 struct virt_dma_desc *vdesc = vchan_find_desc(&echan->vchan, in edma_tx_status()
1939 txstate->residue = to_edma_desc(&vdesc->tx)->residue; in edma_tx_status()
1941 txstate->residue = 0; in edma_tx_status()
1948 if (ret != DMA_COMPLETE && !txstate->residue && in edma_tx_status()
1949 echan->edesc && echan->edesc->polled && in edma_tx_status()
1950 echan->edesc->vdesc.tx.cookie == cookie) { in edma_tx_status()
1952 vchan_cookie_complete(&echan->edesc->vdesc); in edma_tx_status()
1953 echan->edesc = NULL; in edma_tx_status()
1958 spin_unlock_irqrestore(&echan->vchan.lock, flags); in edma_tx_status()
1967 while (*memcpy_channels != -1) { in edma_is_memcpy_channel()
1982 struct dma_device *s_ddev = &ecc->dma_slave; in edma_dma_init()
1984 s32 *memcpy_channels = ecc->info->memcpy_channels; in edma_dma_init()
1987 dma_cap_zero(s_ddev->cap_mask); in edma_dma_init()
1988 dma_cap_set(DMA_SLAVE, s_ddev->cap_mask); in edma_dma_init()
1989 dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask); in edma_dma_init()
1990 if (ecc->legacy_mode && !memcpy_channels) { in edma_dma_init()
1991 dev_warn(ecc->dev, in edma_dma_init()
1994 dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask); in edma_dma_init()
1995 dma_cap_set(DMA_INTERLEAVE, s_ddev->cap_mask); in edma_dma_init()
1996 s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; in edma_dma_init()
1997 s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved; in edma_dma_init()
1998 s_ddev->directions = BIT(DMA_MEM_TO_MEM); in edma_dma_init()
2001 s_ddev->device_prep_slave_sg = edma_prep_slave_sg; in edma_dma_init()
2002 s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic; in edma_dma_init()
2003 s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; in edma_dma_init()
2004 s_ddev->device_free_chan_resources = edma_free_chan_resources; in edma_dma_init()
2005 s_ddev->device_issue_pending = edma_issue_pending; in edma_dma_init()
2006 s_ddev->device_tx_status = edma_tx_status; in edma_dma_init()
2007 s_ddev->device_config = edma_slave_config; in edma_dma_init()
2008 s_ddev->device_pause = edma_dma_pause; in edma_dma_init()
2009 s_ddev->device_resume = edma_dma_resume; in edma_dma_init()
2010 s_ddev->device_terminate_all = edma_terminate_all; in edma_dma_init()
2011 s_ddev->device_synchronize = edma_synchronize; in edma_dma_init()
2013 s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; in edma_dma_init()
2014 s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; in edma_dma_init()
2015 s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV)); in edma_dma_init()
2016 s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in edma_dma_init()
2017 s_ddev->max_burst = SZ_32K - 1; /* CIDX: 16bit signed */ in edma_dma_init()
2019 s_ddev->dev = ecc->dev; in edma_dma_init()
2020 INIT_LIST_HEAD(&s_ddev->channels); in edma_dma_init()
2023 m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL); in edma_dma_init()
2025 dev_warn(ecc->dev, "memcpy is disabled due to OoM\n"); in edma_dma_init()
2029 ecc->dma_memcpy = m_ddev; in edma_dma_init()
2031 dma_cap_zero(m_ddev->cap_mask); in edma_dma_init()
2032 dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask); in edma_dma_init()
2033 dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask); in edma_dma_init()
2035 m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; in edma_dma_init()
2036 m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved; in edma_dma_init()
2037 m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; in edma_dma_init()
2038 m_ddev->device_free_chan_resources = edma_free_chan_resources; in edma_dma_init()
2039 m_ddev->device_issue_pending = edma_issue_pending; in edma_dma_init()
2040 m_ddev->device_tx_status = edma_tx_status; in edma_dma_init()
2041 m_ddev->device_config = edma_slave_config; in edma_dma_init()
2042 m_ddev->device_pause = edma_dma_pause; in edma_dma_init()
2043 m_ddev->device_resume = edma_dma_resume; in edma_dma_init()
2044 m_ddev->device_terminate_all = edma_terminate_all; in edma_dma_init()
2045 m_ddev->device_synchronize = edma_synchronize; in edma_dma_init()
2047 m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; in edma_dma_init()
2048 m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; in edma_dma_init()
2049 m_ddev->directions = BIT(DMA_MEM_TO_MEM); in edma_dma_init()
2050 m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in edma_dma_init()
2052 m_ddev->dev = ecc->dev; in edma_dma_init()
2053 INIT_LIST_HEAD(&m_ddev->channels); in edma_dma_init()
2054 } else if (!ecc->legacy_mode) { in edma_dma_init()
2055 dev_info(ecc->dev, "memcpy is disabled\n"); in edma_dma_init()
2059 for (i = 0; i < ecc->num_channels; i++) { in edma_dma_init()
2060 struct edma_chan *echan = &ecc->slave_chans[i]; in edma_dma_init()
2061 echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i); in edma_dma_init()
2062 echan->ecc = ecc; in edma_dma_init()
2063 echan->vchan.desc_free = edma_desc_free; in edma_dma_init()
2066 vchan_init(&echan->vchan, m_ddev); in edma_dma_init()
2068 vchan_init(&echan->vchan, s_ddev); in edma_dma_init()
2070 INIT_LIST_HEAD(&echan->node); in edma_dma_init()
2072 echan->slot[j] = -1; in edma_dma_init()
2087 ecc->num_region = BIT(value); in edma_setup_from_hw()
2090 ecc->num_channels = BIT(value + 1); in edma_setup_from_hw()
2093 ecc->num_qchannels = value * 2; in edma_setup_from_hw()
2096 ecc->num_slots = BIT(value + 4); in edma_setup_from_hw()
2099 ecc->num_tc = value + 1; in edma_setup_from_hw()
2101 ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false; in edma_setup_from_hw()
2104 dev_dbg(dev, "num_region: %u\n", ecc->num_region); in edma_setup_from_hw()
2105 dev_dbg(dev, "num_channels: %u\n", ecc->num_channels); in edma_setup_from_hw()
2106 dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels); in edma_setup_from_hw()
2107 dev_dbg(dev, "num_slots: %u\n", ecc->num_slots); in edma_setup_from_hw()
2108 dev_dbg(dev, "num_tc: %u\n", ecc->num_tc); in edma_setup_from_hw()
2109 dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no"); in edma_setup_from_hw()
2112 if (pdata->queue_priority_mapping) in edma_setup_from_hw()
2117 * Q0 - priority 0 in edma_setup_from_hw()
2118 * Q1 - priority 1 in edma_setup_from_hw()
2119 * Q2 - priority 2 in edma_setup_from_hw()
2125 queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8), in edma_setup_from_hw()
2128 return -ENOMEM; in edma_setup_from_hw()
2130 for (i = 0; i < ecc->num_tc; i++) { in edma_setup_from_hw()
2134 queue_priority_map[i][0] = -1; in edma_setup_from_hw()
2135 queue_priority_map[i][1] = -1; in edma_setup_from_hw()
2137 pdata->queue_priority_mapping = queue_priority_map; in edma_setup_from_hw()
2139 pdata->default_queue = i - 1; in edma_setup_from_hw()
2148 const char pname[] = "ti,edma-xbar-event-map"; in edma_xbar_event_map()
2158 return -ENOMEM; in edma_xbar_event_map()
2160 ret = of_address_to_resource(dev->of_node, 1, &res); in edma_xbar_event_map()
2162 return -ENOMEM; in edma_xbar_event_map()
2166 return -ENOMEM; in edma_xbar_event_map()
2168 ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans, in edma_xbar_event_map()
2171 return -EIO; in edma_xbar_event_map()
2175 xbar_chans[nelm][0] = -1; in edma_xbar_event_map()
2176 xbar_chans[nelm][1] = -1; in edma_xbar_event_map()
2187 pdata->xbar_chans = (const s16 (*)[2]) xbar_chans; in edma_xbar_event_map()
2200 return ERR_PTR(-ENOMEM); in edma_setup_info_from_dt()
2203 prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map", in edma_setup_info_from_dt()
2214 prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz); in edma_setup_info_from_dt()
2216 const char pname[] = "ti,edma-memcpy-channels"; in edma_setup_info_from_dt()
2223 return ERR_PTR(-ENOMEM); in edma_setup_info_from_dt()
2225 ret = of_property_read_u32_array(dev->of_node, pname, in edma_setup_info_from_dt()
2230 memcpy_ch[nelm] = -1; in edma_setup_info_from_dt()
2231 info->memcpy_channels = memcpy_ch; in edma_setup_info_from_dt()
2234 prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges", in edma_setup_info_from_dt()
2237 const char pname[] = "ti,edma-reserved-slot-ranges"; in edma_setup_info_from_dt()
2249 return ERR_PTR(-ENOMEM); in edma_setup_info_from_dt()
2254 return ERR_PTR(-ENOMEM); in edma_setup_info_from_dt()
2261 return ERR_PTR(-ENOMEM); in edma_setup_info_from_dt()
2264 ret = of_property_read_u32_array(dev->of_node, pname, in edma_setup_info_from_dt()
2275 rsv_slots[nelm][0] = -1; in edma_setup_info_from_dt()
2276 rsv_slots[nelm][1] = -1; in edma_setup_info_from_dt()
2278 info->rsv = rsv_info; in edma_setup_info_from_dt()
2279 info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots; in edma_setup_info_from_dt()
2290 struct edma_cc *ecc = ofdma->of_dma_data; in of_edma_xlate()
2295 if (!ecc || dma_spec->args_count < 1) in of_edma_xlate()
2298 for (i = 0; i < ecc->num_channels; i++) { in of_edma_xlate()
2299 echan = &ecc->slave_chans[i]; in of_edma_xlate()
2300 if (echan->ch_num == dma_spec->args[0]) { in of_edma_xlate()
2301 chan = &echan->vchan.chan; in of_edma_xlate()
2309 if (echan->ecc->legacy_mode && dma_spec->args_count == 1) in of_edma_xlate()
2312 if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 && in of_edma_xlate()
2313 dma_spec->args[1] < echan->ecc->num_tc) { in of_edma_xlate()
2314 echan->tc = &echan->ecc->tc_list[dma_spec->args[1]]; in of_edma_xlate()
2321 echan->hw_triggered = true; in of_edma_xlate()
2328 return ERR_PTR(-EINVAL); in edma_setup_info_from_dt()
2342 struct edma_soc_info *info = pdev->dev.platform_data; in edma_probe()
2348 struct device_node *node = pdev->dev.of_node; in edma_probe()
2349 struct device *dev = &pdev->dev; in edma_probe()
2358 if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC) in edma_probe()
2369 return -ENODEV; in edma_probe()
2377 return -ENOMEM; in edma_probe()
2379 ecc->dev = dev; in edma_probe()
2380 ecc->id = pdev->id; in edma_probe()
2381 ecc->legacy_mode = legacy_mode; in edma_probe()
2382 /* When booting with DT the pdev->id is -1 */ in edma_probe()
2383 if (ecc->id < 0) in edma_probe()
2384 ecc->id = 0; in edma_probe()
2392 return -ENODEV; in edma_probe()
2395 ecc->base = devm_ioremap_resource(dev, mem); in edma_probe()
2396 if (IS_ERR(ecc->base)) in edma_probe()
2397 return PTR_ERR(ecc->base); in edma_probe()
2415 ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels, in edma_probe()
2416 sizeof(*ecc->slave_chans), GFP_KERNEL); in edma_probe()
2418 ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots), in edma_probe()
2421 ecc->channels_mask = devm_kcalloc(dev, in edma_probe()
2422 BITS_TO_LONGS(ecc->num_channels), in edma_probe()
2424 if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) { in edma_probe()
2425 ret = -ENOMEM; in edma_probe()
2430 bitmap_fill(ecc->channels_mask, ecc->num_channels); in edma_probe()
2432 ecc->default_queue = info->default_queue; in edma_probe()
2434 if (info->rsv) { in edma_probe()
2436 reserved = info->rsv->rsv_slots; in edma_probe()
2438 for (i = 0; reserved[i][0] != -1; i++) in edma_probe()
2439 bitmap_set(ecc->slot_inuse, reserved[i][0], in edma_probe()
2444 reserved = info->rsv->rsv_chans; in edma_probe()
2446 for (i = 0; reserved[i][0] != -1; i++) in edma_probe()
2447 bitmap_clear(ecc->channels_mask, reserved[i][0], in edma_probe()
2452 for (i = 0; i < ecc->num_slots; i++) { in edma_probe()
2453 /* Reset only unused - not reserved - paRAM slots */ in edma_probe()
2454 if (!test_bit(i, ecc->slot_inuse)) in edma_probe()
2468 dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret); in edma_probe()
2471 ecc->ccint = irq; in edma_probe()
2484 dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret); in edma_probe()
2487 ecc->ccerrint = irq; in edma_probe()
2490 ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); in edma_probe()
2491 if (ecc->dummy_slot < 0) { in edma_probe()
2493 ret = ecc->dummy_slot; in edma_probe()
2497 queue_priority_mapping = info->queue_priority_mapping; in edma_probe()
2499 if (!ecc->legacy_mode) { in edma_probe()
2504 ecc->tc_list = devm_kcalloc(dev, ecc->num_tc, in edma_probe()
2505 sizeof(*ecc->tc_list), GFP_KERNEL); in edma_probe()
2506 if (!ecc->tc_list) { in edma_probe()
2507 ret = -ENOMEM; in edma_probe()
2514 if (ret || i == ecc->num_tc) in edma_probe()
2517 ecc->tc_list[i].node = tc_args.np; in edma_probe()
2518 ecc->tc_list[i].id = i; in edma_probe()
2522 info->default_queue = i; in edma_probe()
2526 /* See if we have optional dma-channel-mask array */ in edma_probe()
2527 array_max = DIV_ROUND_UP(ecc->num_channels, BITS_PER_TYPE(u32)); in edma_probe()
2529 "dma-channel-mask", in edma_probe()
2530 (u32 *)ecc->channels_mask, in edma_probe()
2533 dev_warn(dev, "dma-channel-mask is not complete.\n"); in edma_probe()
2534 else if (ret == -EOVERFLOW || ret == -ENODATA) in edma_probe()
2536 "dma-channel-mask is out of range or empty\n"); in edma_probe()
2540 for (i = 0; queue_priority_mapping[i][0] != -1; i++) in edma_probe()
2548 ecc->info = info; in edma_probe()
2553 for (i = 0; i < ecc->num_channels; i++) { in edma_probe()
2555 if (!test_bit(i, ecc->channels_mask)) in edma_probe()
2559 edma_assign_channel_eventq(&ecc->slave_chans[i], in edma_probe()
2560 info->default_queue); in edma_probe()
2562 edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot); in edma_probe()
2565 ecc->dma_slave.filter.map = info->slave_map; in edma_probe()
2566 ecc->dma_slave.filter.mapcnt = info->slavecnt; in edma_probe()
2567 ecc->dma_slave.filter.fn = edma_filter_fn; in edma_probe()
2569 ret = dma_async_device_register(&ecc->dma_slave); in edma_probe()
2575 if (ecc->dma_memcpy) { in edma_probe()
2576 ret = dma_async_device_register(ecc->dma_memcpy); in edma_probe()
2580 dma_async_device_unregister(&ecc->dma_slave); in edma_probe()
2593 edma_free_slot(ecc, ecc->dummy_slot); in edma_probe()
2605 &dmadev->channels, vchan.chan.device_node) { in edma_cleanupp_vchan()
2606 list_del(&echan->vchan.chan.device_node); in edma_cleanupp_vchan()
2607 tasklet_kill(&echan->vchan.task); in edma_cleanupp_vchan()
2613 struct device *dev = &pdev->dev; in edma_remove()
2616 devm_free_irq(dev, ecc->ccint, ecc); in edma_remove()
2617 devm_free_irq(dev, ecc->ccerrint, ecc); in edma_remove()
2619 edma_cleanupp_vchan(&ecc->dma_slave); in edma_remove()
2621 if (dev->of_node) in edma_remove()
2622 of_dma_controller_free(dev->of_node); in edma_remove()
2623 dma_async_device_unregister(&ecc->dma_slave); in edma_remove()
2624 if (ecc->dma_memcpy) in edma_remove()
2625 dma_async_device_unregister(ecc->dma_memcpy); in edma_remove()
2626 edma_free_slot(ecc, ecc->dummy_slot); in edma_remove()
2637 struct edma_chan *echan = ecc->slave_chans; in edma_pm_suspend()
2640 for (i = 0; i < ecc->num_channels; i++) { in edma_pm_suspend()
2651 struct edma_chan *echan = ecc->slave_chans; in edma_pm_resume()
2656 edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset); in edma_pm_resume()
2658 queue_priority_mapping = ecc->info->queue_priority_mapping; in edma_pm_resume()
2661 for (i = 0; queue_priority_mapping[i][0] != -1; i++) in edma_pm_resume()
2665 for (i = 0; i < ecc->num_channels; i++) { in edma_pm_resume()
2674 /* Set up channel -> slot mapping for the entry slot */ in edma_pm_resume()
2699 pm_runtime_enable(&pdev->dev); in edma_tptc_probe()
2700 return pm_runtime_get_sync(&pdev->dev); in edma_tptc_probe()
2706 .name = "edma3-tptc",
2715 if (chan->device->dev->driver == &edma_driver.driver) { in edma_filter_fn()
2718 if (ch_req == echan->ch_num) { in edma_filter_fn()
2720 echan->hw_triggered = true; in edma_filter_fn()