Lines Matching +full:free +full:- +full:running
1 // SPDX-License-Identifier: GPL-2.0
3 * Renesas R-Car Gen2 DMA Controller Driver
11 #include <linux/dma-mapping.h>
28 * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
43 * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
56 * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
63 * @running: the transfer chunk being currently processed
80 struct rcar_dmac_xfer_chunk *running; member
97 * struct rcar_dmac_desc_page - One page worth of descriptors
112 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
115 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
119 * struct rcar_dmac_chan_slave - Slave configuration
129 * struct rcar_dmac_chan_map - Map of slave device phys to dma address
141 * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
150 * @desc.free: list of free descriptors
155 * @desc.running: the descriptor being processed (a member of the active list)
156 * @desc.chunks_free: list of free transfer chunk descriptors
173 struct list_head free; member
178 struct rcar_dmac_desc *running; member
189 * struct rcar_dmac - R-Car Gen2 DMA Controller
211 /* -----------------------------------------------------------------------------
290 /* -----------------------------------------------------------------------------
297 writew(data, dmac->iomem + reg); in rcar_dmac_write()
299 writel(data, dmac->iomem + reg); in rcar_dmac_write()
305 return readw(dmac->iomem + reg); in rcar_dmac_read()
307 return readl(dmac->iomem + reg); in rcar_dmac_read()
313 return readw(chan->iomem + reg); in rcar_dmac_chan_read()
315 return readl(chan->iomem + reg); in rcar_dmac_chan_read()
321 writew(data, chan->iomem + reg); in rcar_dmac_chan_write()
323 writel(data, chan->iomem + reg); in rcar_dmac_chan_write()
326 /* -----------------------------------------------------------------------------
339 struct rcar_dmac_desc *desc = chan->desc.running; in rcar_dmac_chan_start_xfer()
340 u32 chcr = desc->chcr; in rcar_dmac_chan_start_xfer()
344 if (chan->mid_rid >= 0) in rcar_dmac_chan_start_xfer()
345 rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); in rcar_dmac_chan_start_xfer()
347 if (desc->hwdescs.use) { in rcar_dmac_chan_start_xfer()
349 list_first_entry(&desc->chunks, in rcar_dmac_chan_start_xfer()
352 dev_dbg(chan->chan.device->dev, in rcar_dmac_chan_start_xfer()
354 chan->index, desc, desc->nchunks, &desc->hwdescs.dma); in rcar_dmac_chan_start_xfer()
358 chunk->src_addr >> 32); in rcar_dmac_chan_start_xfer()
360 chunk->dst_addr >> 32); in rcar_dmac_chan_start_xfer()
362 desc->hwdescs.dma >> 32); in rcar_dmac_chan_start_xfer()
365 (desc->hwdescs.dma & 0xfffffff0) | in rcar_dmac_chan_start_xfer()
368 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) | in rcar_dmac_chan_start_xfer()
379 chunk->dst_addr & 0xffffffff); in rcar_dmac_chan_start_xfer()
394 if (!desc->cyclic) in rcar_dmac_chan_start_xfer()
400 else if (desc->async_tx.callback) in rcar_dmac_chan_start_xfer()
409 struct rcar_dmac_xfer_chunk *chunk = desc->running; in rcar_dmac_chan_start_xfer()
411 dev_dbg(chan->chan.device->dev, in rcar_dmac_chan_start_xfer()
412 "chan%u: queue chunk %p: %u@%pad -> %pad\n", in rcar_dmac_chan_start_xfer()
413 chan->index, chunk, chunk->size, &chunk->src_addr, in rcar_dmac_chan_start_xfer()
414 &chunk->dst_addr); in rcar_dmac_chan_start_xfer()
418 chunk->src_addr >> 32); in rcar_dmac_chan_start_xfer()
420 chunk->dst_addr >> 32); in rcar_dmac_chan_start_xfer()
423 chunk->src_addr & 0xffffffff); in rcar_dmac_chan_start_xfer()
425 chunk->dst_addr & 0xffffffff); in rcar_dmac_chan_start_xfer()
427 chunk->size >> desc->xfer_shift); in rcar_dmac_chan_start_xfer()
441 rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0)); in rcar_dmac_init()
447 dev_warn(dmac->dev, "DMAOR initialization failed.\n"); in rcar_dmac_init()
448 return -EIO; in rcar_dmac_init()
454 /* -----------------------------------------------------------------------------
460 struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan); in rcar_dmac_tx_submit()
465 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_tx_submit()
469 dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n", in rcar_dmac_tx_submit()
470 chan->index, tx->cookie, desc); in rcar_dmac_tx_submit()
472 list_add_tail(&desc->node, &chan->desc.pending); in rcar_dmac_tx_submit()
473 desc->running = list_first_entry(&desc->chunks, in rcar_dmac_tx_submit()
476 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_tx_submit()
481 /* -----------------------------------------------------------------------------
482 * Descriptors allocation and free
486 * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
499 return -ENOMEM; in rcar_dmac_desc_alloc()
502 struct rcar_dmac_desc *desc = &page->descs[i]; in rcar_dmac_desc_alloc()
504 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); in rcar_dmac_desc_alloc()
505 desc->async_tx.tx_submit = rcar_dmac_tx_submit; in rcar_dmac_desc_alloc()
506 INIT_LIST_HEAD(&desc->chunks); in rcar_dmac_desc_alloc()
508 list_add_tail(&desc->node, &list); in rcar_dmac_desc_alloc()
511 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_alloc()
512 list_splice_tail(&list, &chan->desc.free); in rcar_dmac_desc_alloc()
513 list_add_tail(&page->node, &chan->desc.pages); in rcar_dmac_desc_alloc()
514 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_alloc()
520 * rcar_dmac_desc_put - Release a DMA transfer descriptor
525 * free descriptors lists. The descriptor's chunks list will be reinitialized to
536 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_put()
537 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); in rcar_dmac_desc_put()
538 list_add(&desc->node, &chan->desc.free); in rcar_dmac_desc_put()
539 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_put()
554 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_recycle_acked()
555 list_splice_init(&chan->desc.wait, &list); in rcar_dmac_desc_recycle_acked()
556 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_recycle_acked()
559 if (async_tx_test_ack(&desc->async_tx)) { in rcar_dmac_desc_recycle_acked()
560 list_del(&desc->node); in rcar_dmac_desc_recycle_acked()
569 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_recycle_acked()
570 list_splice(&list, &chan->desc.wait); in rcar_dmac_desc_recycle_acked()
571 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_recycle_acked()
575 * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
578 * Locking: This function must be called in a non-atomic context.
592 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_get()
594 while (list_empty(&chan->desc.free)) { in rcar_dmac_desc_get()
596 * No free descriptors, allocate a page worth of them and try in rcar_dmac_desc_get()
601 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_get()
605 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_desc_get()
608 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node); in rcar_dmac_desc_get()
609 list_del(&desc->node); in rcar_dmac_desc_get()
611 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_desc_get()
617 * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
630 return -ENOMEM; in rcar_dmac_xfer_chunk_alloc()
633 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; in rcar_dmac_xfer_chunk_alloc()
635 list_add_tail(&chunk->node, &list); in rcar_dmac_xfer_chunk_alloc()
638 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_xfer_chunk_alloc()
639 list_splice_tail(&list, &chan->desc.chunks_free); in rcar_dmac_xfer_chunk_alloc()
640 list_add_tail(&page->node, &chan->desc.pages); in rcar_dmac_xfer_chunk_alloc()
641 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_xfer_chunk_alloc()
647 * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
650 * Locking: This function must be called in a non-atomic context.
662 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_xfer_chunk_get()
664 while (list_empty(&chan->desc.chunks_free)) { in rcar_dmac_xfer_chunk_get()
666 * No free descriptors, allocate a page worth of them and try in rcar_dmac_xfer_chunk_get()
671 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_xfer_chunk_get()
675 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_xfer_chunk_get()
678 chunk = list_first_entry(&chan->desc.chunks_free, in rcar_dmac_xfer_chunk_get()
680 list_del(&chunk->node); in rcar_dmac_xfer_chunk_get()
682 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_xfer_chunk_get()
698 if (desc->hwdescs.size == size) in rcar_dmac_realloc_hwdesc()
701 if (desc->hwdescs.mem) { in rcar_dmac_realloc_hwdesc()
702 dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size, in rcar_dmac_realloc_hwdesc()
703 desc->hwdescs.mem, desc->hwdescs.dma); in rcar_dmac_realloc_hwdesc()
704 desc->hwdescs.mem = NULL; in rcar_dmac_realloc_hwdesc()
705 desc->hwdescs.size = 0; in rcar_dmac_realloc_hwdesc()
711 desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size, in rcar_dmac_realloc_hwdesc()
712 &desc->hwdescs.dma, GFP_NOWAIT); in rcar_dmac_realloc_hwdesc()
713 if (!desc->hwdescs.mem) in rcar_dmac_realloc_hwdesc()
716 desc->hwdescs.size = size; in rcar_dmac_realloc_hwdesc()
725 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc)); in rcar_dmac_fill_hwdesc()
727 hwdesc = desc->hwdescs.mem; in rcar_dmac_fill_hwdesc()
729 return -ENOMEM; in rcar_dmac_fill_hwdesc()
731 list_for_each_entry(chunk, &desc->chunks, node) { in rcar_dmac_fill_hwdesc()
732 hwdesc->sar = chunk->src_addr; in rcar_dmac_fill_hwdesc()
733 hwdesc->dar = chunk->dst_addr; in rcar_dmac_fill_hwdesc()
734 hwdesc->tcr = chunk->size >> desc->xfer_shift; in rcar_dmac_fill_hwdesc()
741 /* -----------------------------------------------------------------------------
760 dev_err(chan->chan.device->dev, "CHCR DE check error\n"); in rcar_dmac_chcr_de_barrier()
791 spin_lock_irqsave(&chan->lock, flags); in rcar_dmac_chan_reinit()
793 /* Move all non-free descriptors to the local lists. */ in rcar_dmac_chan_reinit()
794 list_splice_init(&chan->desc.pending, &descs); in rcar_dmac_chan_reinit()
795 list_splice_init(&chan->desc.active, &descs); in rcar_dmac_chan_reinit()
796 list_splice_init(&chan->desc.done, &descs); in rcar_dmac_chan_reinit()
797 list_splice_init(&chan->desc.wait, &descs); in rcar_dmac_chan_reinit()
799 chan->desc.running = NULL; in rcar_dmac_chan_reinit()
801 spin_unlock_irqrestore(&chan->lock, flags); in rcar_dmac_chan_reinit()
804 list_del(&desc->node); in rcar_dmac_chan_reinit()
814 for (i = 0; i < dmac->n_channels; ++i) { in rcar_dmac_stop_all_chan()
815 struct rcar_dmac_chan *chan = &dmac->channels[i]; in rcar_dmac_stop_all_chan()
818 spin_lock_irq(&chan->lock); in rcar_dmac_stop_all_chan()
820 spin_unlock_irq(&chan->lock); in rcar_dmac_stop_all_chan()
829 spin_lock_irqsave(&rchan->lock, flags); in rcar_dmac_chan_pause()
831 spin_unlock_irqrestore(&rchan->lock, flags); in rcar_dmac_chan_pause()
836 /* -----------------------------------------------------------------------------
853 switch (desc->direction) { in rcar_dmac_chan_configure_desc()
857 xfer_size = chan->src.xfer_size; in rcar_dmac_chan_configure_desc()
863 xfer_size = chan->dst.xfer_size; in rcar_dmac_chan_configure_desc()
874 desc->xfer_shift = ilog2(xfer_size); in rcar_dmac_chan_configure_desc()
875 desc->chcr = chcr | chcr_ts[desc->xfer_shift]; in rcar_dmac_chan_configure_desc()
879 * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
882 * converted to scatter-gather to guarantee consistent locking and a correct
911 desc->async_tx.flags = dma_flags; in rcar_dmac_chan_prep_sg()
912 desc->async_tx.cookie = -EBUSY; in rcar_dmac_chan_prep_sg()
914 desc->cyclic = cyclic; in rcar_dmac_chan_prep_sg()
915 desc->direction = dir; in rcar_dmac_chan_prep_sg()
919 max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift; in rcar_dmac_chan_prep_sg()
949 if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) { in rcar_dmac_chan_prep_sg()
950 size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; in rcar_dmac_chan_prep_sg()
953 if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) { in rcar_dmac_chan_prep_sg()
954 size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; in rcar_dmac_chan_prep_sg()
966 chunk->src_addr = dev_addr; in rcar_dmac_chan_prep_sg()
967 chunk->dst_addr = mem_addr; in rcar_dmac_chan_prep_sg()
969 chunk->src_addr = mem_addr; in rcar_dmac_chan_prep_sg()
970 chunk->dst_addr = dev_addr; in rcar_dmac_chan_prep_sg()
973 chunk->size = size; in rcar_dmac_chan_prep_sg()
975 dev_dbg(chan->chan.device->dev, in rcar_dmac_chan_prep_sg()
976 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n", in rcar_dmac_chan_prep_sg()
977 chan->index, chunk, desc, i, sg, size, len, in rcar_dmac_chan_prep_sg()
978 &chunk->src_addr, &chunk->dst_addr); in rcar_dmac_chan_prep_sg()
984 len -= size; in rcar_dmac_chan_prep_sg()
986 list_add_tail(&chunk->node, &desc->chunks); in rcar_dmac_chan_prep_sg()
991 desc->nchunks = nchunks; in rcar_dmac_chan_prep_sg()
992 desc->size = full_size; in rcar_dmac_chan_prep_sg()
1002 desc->hwdescs.use = !cross_boundary && nchunks > 1; in rcar_dmac_chan_prep_sg()
1003 if (desc->hwdescs.use) { in rcar_dmac_chan_prep_sg()
1005 desc->hwdescs.use = false; in rcar_dmac_chan_prep_sg()
1008 return &desc->async_tx; in rcar_dmac_chan_prep_sg()
1011 /* -----------------------------------------------------------------------------
1020 INIT_LIST_HEAD(&rchan->desc.chunks_free); in rcar_dmac_alloc_chan_resources()
1021 INIT_LIST_HEAD(&rchan->desc.pages); in rcar_dmac_alloc_chan_resources()
1026 return -ENOMEM; in rcar_dmac_alloc_chan_resources()
1030 return -ENOMEM; in rcar_dmac_alloc_chan_resources()
1032 return pm_runtime_get_sync(chan->device->dev); in rcar_dmac_alloc_chan_resources()
1038 struct rcar_dmac *dmac = to_rcar_dmac(chan->device); in rcar_dmac_free_chan_resources()
1039 struct rcar_dmac_chan_map *map = &rchan->map; in rcar_dmac_free_chan_resources()
1045 spin_lock_irq(&rchan->lock); in rcar_dmac_free_chan_resources()
1047 spin_unlock_irq(&rchan->lock); in rcar_dmac_free_chan_resources()
1051 * running. Wait for it to finish before freeing resources. in rcar_dmac_free_chan_resources()
1053 synchronize_irq(rchan->irq); in rcar_dmac_free_chan_resources()
1055 if (rchan->mid_rid >= 0) { in rcar_dmac_free_chan_resources()
1057 clear_bit(rchan->mid_rid, dmac->modules); in rcar_dmac_free_chan_resources()
1058 rchan->mid_rid = -EINVAL; in rcar_dmac_free_chan_resources()
1061 list_splice_init(&rchan->desc.free, &list); in rcar_dmac_free_chan_resources()
1062 list_splice_init(&rchan->desc.pending, &list); in rcar_dmac_free_chan_resources()
1063 list_splice_init(&rchan->desc.active, &list); in rcar_dmac_free_chan_resources()
1064 list_splice_init(&rchan->desc.done, &list); in rcar_dmac_free_chan_resources()
1065 list_splice_init(&rchan->desc.wait, &list); in rcar_dmac_free_chan_resources()
1067 rchan->desc.running = NULL; in rcar_dmac_free_chan_resources()
1072 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) { in rcar_dmac_free_chan_resources()
1073 list_del(&page->node); in rcar_dmac_free_chan_resources()
1078 if (map->slave.xfer_size) { in rcar_dmac_free_chan_resources()
1079 dma_unmap_resource(chan->device->dev, map->addr, in rcar_dmac_free_chan_resources()
1080 map->slave.xfer_size, map->dir, 0); in rcar_dmac_free_chan_resources()
1081 map->slave.xfer_size = 0; in rcar_dmac_free_chan_resources()
1084 pm_runtime_put(chan->device->dev); in rcar_dmac_free_chan_resources()
1111 struct rcar_dmac_chan_map *map = &rchan->map; in rcar_dmac_map_slave_addr()
1117 dev_addr = rchan->src.slave_addr; in rcar_dmac_map_slave_addr()
1118 dev_size = rchan->src.xfer_size; in rcar_dmac_map_slave_addr()
1121 dev_addr = rchan->dst.slave_addr; in rcar_dmac_map_slave_addr()
1122 dev_size = rchan->dst.xfer_size; in rcar_dmac_map_slave_addr()
1127 if (dev_addr == map->slave.slave_addr && in rcar_dmac_map_slave_addr()
1128 dev_size == map->slave.xfer_size && in rcar_dmac_map_slave_addr()
1129 dev_dir == map->dir) in rcar_dmac_map_slave_addr()
1133 if (map->slave.xfer_size) in rcar_dmac_map_slave_addr()
1134 dma_unmap_resource(chan->device->dev, map->addr, in rcar_dmac_map_slave_addr()
1135 map->slave.xfer_size, map->dir, 0); in rcar_dmac_map_slave_addr()
1136 map->slave.xfer_size = 0; in rcar_dmac_map_slave_addr()
1139 map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size, in rcar_dmac_map_slave_addr()
1142 if (dma_mapping_error(chan->device->dev, map->addr)) { in rcar_dmac_map_slave_addr()
1143 dev_err(chan->device->dev, in rcar_dmac_map_slave_addr()
1144 "chan%u: failed to map %zx@%pap", rchan->index, in rcar_dmac_map_slave_addr()
1146 return -EIO; in rcar_dmac_map_slave_addr()
1149 dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n", in rcar_dmac_map_slave_addr()
1150 rchan->index, dev_size, &dev_addr, &map->addr, in rcar_dmac_map_slave_addr()
1153 map->slave.slave_addr = dev_addr; in rcar_dmac_map_slave_addr()
1154 map->slave.xfer_size = dev_size; in rcar_dmac_map_slave_addr()
1155 map->dir = dev_dir; in rcar_dmac_map_slave_addr()
1168 if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) { in rcar_dmac_prep_slave_sg()
1169 dev_warn(chan->device->dev, in rcar_dmac_prep_slave_sg()
1171 __func__, sg_len, rchan->mid_rid); in rcar_dmac_prep_slave_sg()
1178 return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, in rcar_dmac_prep_slave_sg()
1196 if (rchan->mid_rid < 0 || buf_len < period_len) { in rcar_dmac_prep_dma_cyclic()
1197 dev_warn(chan->device->dev, in rcar_dmac_prep_dma_cyclic()
1199 __func__, buf_len, period_len, rchan->mid_rid); in rcar_dmac_prep_dma_cyclic()
1208 dev_err(chan->device->dev, in rcar_dmac_prep_dma_cyclic()
1210 rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN); in rcar_dmac_prep_dma_cyclic()
1233 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, in rcar_dmac_prep_dma_cyclic()
1249 rchan->src.slave_addr = cfg->src_addr; in rcar_dmac_device_config()
1250 rchan->dst.slave_addr = cfg->dst_addr; in rcar_dmac_device_config()
1251 rchan->src.xfer_size = cfg->src_addr_width; in rcar_dmac_device_config()
1252 rchan->dst.xfer_size = cfg->dst_addr_width; in rcar_dmac_device_config()
1262 spin_lock_irqsave(&rchan->lock, flags); in rcar_dmac_chan_terminate_all()
1264 spin_unlock_irqrestore(&rchan->lock, flags); in rcar_dmac_chan_terminate_all()
1268 * be running. in rcar_dmac_chan_terminate_all()
1279 struct rcar_dmac_desc *desc = chan->desc.running; in rcar_dmac_chan_get_residue()
1280 struct rcar_dmac_xfer_chunk *running = NULL; in rcar_dmac_chan_get_residue() local
1298 status = dma_cookie_status(&chan->chan, cookie, NULL); in rcar_dmac_chan_get_residue()
1303 * If the cookie doesn't correspond to the currently running transfer in rcar_dmac_chan_get_residue()
1307 * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running" in rcar_dmac_chan_get_residue()
1312 if (cookie != desc->async_tx.cookie) { in rcar_dmac_chan_get_residue()
1313 list_for_each_entry(desc, &chan->desc.done, node) { in rcar_dmac_chan_get_residue()
1314 if (cookie == desc->async_tx.cookie) in rcar_dmac_chan_get_residue()
1317 list_for_each_entry(desc, &chan->desc.pending, node) { in rcar_dmac_chan_get_residue()
1318 if (cookie == desc->async_tx.cookie) in rcar_dmac_chan_get_residue()
1319 return desc->size; in rcar_dmac_chan_get_residue()
1321 list_for_each_entry(desc, &chan->desc.active, node) { in rcar_dmac_chan_get_residue()
1322 if (cookie == desc->async_tx.cookie) in rcar_dmac_chan_get_residue()
1323 return desc->size; in rcar_dmac_chan_get_residue()
1354 * In descriptor mode the descriptor running pointer is not maintained in rcar_dmac_chan_get_residue()
1355 * by the interrupt handler, find the running descriptor from the in rcar_dmac_chan_get_residue()
1356 * descriptor pointer field in the CHCRB register. In non-descriptor in rcar_dmac_chan_get_residue()
1357 * mode just use the running descriptor pointer. in rcar_dmac_chan_get_residue()
1359 if (desc->hwdescs.use) { in rcar_dmac_chan_get_residue()
1362 dptr = desc->nchunks; in rcar_dmac_chan_get_residue()
1363 dptr--; in rcar_dmac_chan_get_residue()
1364 WARN_ON(dptr >= desc->nchunks); in rcar_dmac_chan_get_residue()
1366 running = desc->running; in rcar_dmac_chan_get_residue()
1370 list_for_each_entry_reverse(chunk, &desc->chunks, node) { in rcar_dmac_chan_get_residue()
1371 if (chunk == running || ++dptr == desc->nchunks) in rcar_dmac_chan_get_residue()
1374 residue += chunk->size; in rcar_dmac_chan_get_residue()
1378 residue += tcrb << desc->xfer_shift; in rcar_dmac_chan_get_residue()
1397 spin_lock_irqsave(&rchan->lock, flags); in rcar_dmac_tx_status()
1399 cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false; in rcar_dmac_tx_status()
1400 spin_unlock_irqrestore(&rchan->lock, flags); in rcar_dmac_tx_status()
1416 spin_lock_irqsave(&rchan->lock, flags); in rcar_dmac_issue_pending()
1418 if (list_empty(&rchan->desc.pending)) in rcar_dmac_issue_pending()
1422 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active); in rcar_dmac_issue_pending()
1425 * If no transfer is running pick the first descriptor from the active in rcar_dmac_issue_pending()
1428 if (!rchan->desc.running) { in rcar_dmac_issue_pending()
1431 desc = list_first_entry(&rchan->desc.active, in rcar_dmac_issue_pending()
1433 rchan->desc.running = desc; in rcar_dmac_issue_pending()
1439 spin_unlock_irqrestore(&rchan->lock, flags); in rcar_dmac_issue_pending()
1446 synchronize_irq(rchan->irq); in rcar_dmac_device_synchronize()
1449 /* -----------------------------------------------------------------------------
1455 struct rcar_dmac_desc *desc = chan->desc.running; in rcar_dmac_isr_desc_stage_end()
1458 if (WARN_ON(!desc || !desc->cyclic)) { in rcar_dmac_isr_desc_stage_end()
1460 * This should never happen, there should always be a running in rcar_dmac_isr_desc_stage_end()
1477 struct rcar_dmac_desc *desc = chan->desc.running; in rcar_dmac_isr_transfer_end()
1482 * This should never happen, there should always be a running in rcar_dmac_isr_transfer_end()
1491 * descriptor mode. Only update the running chunk pointer in in rcar_dmac_isr_transfer_end()
1492 * non-descriptor mode. in rcar_dmac_isr_transfer_end()
1494 if (!desc->hwdescs.use) { in rcar_dmac_isr_transfer_end()
1500 if (!list_is_last(&desc->running->node, &desc->chunks)) { in rcar_dmac_isr_transfer_end()
1501 desc->running = list_next_entry(desc->running, node); in rcar_dmac_isr_transfer_end()
1502 if (!desc->cyclic) in rcar_dmac_isr_transfer_end()
1511 if (desc->cyclic) { in rcar_dmac_isr_transfer_end()
1512 desc->running = in rcar_dmac_isr_transfer_end()
1513 list_first_entry(&desc->chunks, in rcar_dmac_isr_transfer_end()
1521 list_move_tail(&desc->node, &chan->desc.done); in rcar_dmac_isr_transfer_end()
1524 if (!list_empty(&chan->desc.active)) in rcar_dmac_isr_transfer_end()
1525 chan->desc.running = list_first_entry(&chan->desc.active, in rcar_dmac_isr_transfer_end()
1529 chan->desc.running = NULL; in rcar_dmac_isr_transfer_end()
1532 if (chan->desc.running) in rcar_dmac_isr_transfer_end()
1546 spin_lock(&chan->lock); in rcar_dmac_isr_channel()
1550 struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device); in rcar_dmac_isr_channel()
1557 rcar_dmac_write(dmac, RCAR_DMACHCLR, 1 << chan->index); in rcar_dmac_isr_channel()
1576 spin_unlock(&chan->lock); in rcar_dmac_isr_channel()
1579 dev_err(chan->chan.device->dev, "Channel Address Error\n"); in rcar_dmac_isr_channel()
1594 spin_lock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1597 if (chan->desc.running && chan->desc.running->cyclic) { in rcar_dmac_isr_channel_thread()
1598 desc = chan->desc.running; in rcar_dmac_isr_channel_thread()
1599 dmaengine_desc_get_callback(&desc->async_tx, &cb); in rcar_dmac_isr_channel_thread()
1602 spin_unlock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1604 spin_lock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1612 while (!list_empty(&chan->desc.done)) { in rcar_dmac_isr_channel_thread()
1613 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc, in rcar_dmac_isr_channel_thread()
1615 dma_cookie_complete(&desc->async_tx); in rcar_dmac_isr_channel_thread()
1616 list_del(&desc->node); in rcar_dmac_isr_channel_thread()
1618 dmaengine_desc_get_callback(&desc->async_tx, &cb); in rcar_dmac_isr_channel_thread()
1620 spin_unlock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1627 spin_lock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1630 list_add_tail(&desc->node, &chan->desc.wait); in rcar_dmac_isr_channel_thread()
1633 spin_unlock_irq(&chan->lock); in rcar_dmac_isr_channel_thread()
1641 /* -----------------------------------------------------------------------------
1647 struct rcar_dmac *dmac = to_rcar_dmac(chan->device); in rcar_dmac_chan_filter()
1657 if (chan->device->device_config != rcar_dmac_device_config || in rcar_dmac_chan_filter()
1658 dma_spec->np != chan->device->dev->of_node) in rcar_dmac_chan_filter()
1661 return !test_and_set_bit(dma_spec->args[0], dmac->modules); in rcar_dmac_chan_filter()
1671 if (dma_spec->args_count != 1) in rcar_dmac_of_xlate()
1683 rchan->mid_rid = dma_spec->args[0]; in rcar_dmac_of_xlate()
1688 /* -----------------------------------------------------------------------------
1709 * - Wait for the current transfer to complete and stop the device,
1710 * - Resume transfers, if any.
1718 /* -----------------------------------------------------------------------------
1726 struct platform_device *pdev = to_platform_device(dmac->dev); in rcar_dmac_chan_probe()
1727 struct dma_chan *chan = &rchan->chan; in rcar_dmac_chan_probe()
1732 rchan->index = index; in rcar_dmac_chan_probe()
1733 rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index); in rcar_dmac_chan_probe()
1734 rchan->mid_rid = -EINVAL; in rcar_dmac_chan_probe()
1736 spin_lock_init(&rchan->lock); in rcar_dmac_chan_probe()
1738 INIT_LIST_HEAD(&rchan->desc.free); in rcar_dmac_chan_probe()
1739 INIT_LIST_HEAD(&rchan->desc.pending); in rcar_dmac_chan_probe()
1740 INIT_LIST_HEAD(&rchan->desc.active); in rcar_dmac_chan_probe()
1741 INIT_LIST_HEAD(&rchan->desc.done); in rcar_dmac_chan_probe()
1742 INIT_LIST_HEAD(&rchan->desc.wait); in rcar_dmac_chan_probe()
1746 rchan->irq = platform_get_irq_byname(pdev, pdev_irqname); in rcar_dmac_chan_probe()
1747 if (rchan->irq < 0) { in rcar_dmac_chan_probe()
1748 dev_err(dmac->dev, "no IRQ specified for channel %u\n", index); in rcar_dmac_chan_probe()
1749 return -ENODEV; in rcar_dmac_chan_probe()
1752 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", in rcar_dmac_chan_probe()
1753 dev_name(dmac->dev), index); in rcar_dmac_chan_probe()
1755 return -ENOMEM; in rcar_dmac_chan_probe()
1761 chan->device = &dmac->engine; in rcar_dmac_chan_probe()
1764 list_add_tail(&chan->device_node, &dmac->engine.channels); in rcar_dmac_chan_probe()
1766 ret = devm_request_threaded_irq(dmac->dev, rchan->irq, in rcar_dmac_chan_probe()
1771 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", in rcar_dmac_chan_probe()
1772 rchan->irq, ret); in rcar_dmac_chan_probe()
1781 struct device_node *np = dev->of_node; in rcar_dmac_parse_of()
1784 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); in rcar_dmac_parse_of()
1786 dev_err(dev, "unable to read dma-channels property\n"); in rcar_dmac_parse_of()
1790 if (dmac->n_channels <= 0 || dmac->n_channels >= 100) { in rcar_dmac_parse_of()
1792 dmac->n_channels); in rcar_dmac_parse_of()
1793 return -EINVAL; in rcar_dmac_parse_of()
1812 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); in rcar_dmac_probe()
1814 return -ENOMEM; in rcar_dmac_probe()
1816 dmac->dev = &pdev->dev; in rcar_dmac_probe()
1818 dmac->dev->dma_parms = &dmac->parms; in rcar_dmac_probe()
1819 dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); in rcar_dmac_probe()
1820 dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); in rcar_dmac_probe()
1822 ret = rcar_dmac_parse_of(&pdev->dev, dmac); in rcar_dmac_probe()
1834 if (pdev->dev.iommu_group) { in rcar_dmac_probe()
1835 dmac->n_channels--; in rcar_dmac_probe()
1839 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, in rcar_dmac_probe()
1840 sizeof(*dmac->channels), GFP_KERNEL); in rcar_dmac_probe()
1841 if (!dmac->channels) in rcar_dmac_probe()
1842 return -ENOMEM; in rcar_dmac_probe()
1846 dmac->iomem = devm_ioremap_resource(&pdev->dev, mem); in rcar_dmac_probe()
1847 if (IS_ERR(dmac->iomem)) in rcar_dmac_probe()
1848 return PTR_ERR(dmac->iomem); in rcar_dmac_probe()
1851 pm_runtime_enable(&pdev->dev); in rcar_dmac_probe()
1852 ret = pm_runtime_get_sync(&pdev->dev); in rcar_dmac_probe()
1854 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); in rcar_dmac_probe()
1859 pm_runtime_put(&pdev->dev); in rcar_dmac_probe()
1862 dev_err(&pdev->dev, "failed to reset device\n"); in rcar_dmac_probe()
1867 engine = &dmac->engine; in rcar_dmac_probe()
1869 dma_cap_set(DMA_MEMCPY, engine->cap_mask); in rcar_dmac_probe()
1870 dma_cap_set(DMA_SLAVE, engine->cap_mask); in rcar_dmac_probe()
1872 engine->dev = &pdev->dev; in rcar_dmac_probe()
1873 engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE); in rcar_dmac_probe()
1875 engine->src_addr_widths = widths; in rcar_dmac_probe()
1876 engine->dst_addr_widths = widths; in rcar_dmac_probe()
1877 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); in rcar_dmac_probe()
1878 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in rcar_dmac_probe()
1880 engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources; in rcar_dmac_probe()
1881 engine->device_free_chan_resources = rcar_dmac_free_chan_resources; in rcar_dmac_probe()
1882 engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy; in rcar_dmac_probe()
1883 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg; in rcar_dmac_probe()
1884 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic; in rcar_dmac_probe()
1885 engine->device_config = rcar_dmac_device_config; in rcar_dmac_probe()
1886 engine->device_pause = rcar_dmac_chan_pause; in rcar_dmac_probe()
1887 engine->device_terminate_all = rcar_dmac_chan_terminate_all; in rcar_dmac_probe()
1888 engine->device_tx_status = rcar_dmac_tx_status; in rcar_dmac_probe()
1889 engine->device_issue_pending = rcar_dmac_issue_pending; in rcar_dmac_probe()
1890 engine->device_synchronize = rcar_dmac_device_synchronize; in rcar_dmac_probe()
1892 INIT_LIST_HEAD(&engine->channels); in rcar_dmac_probe()
1894 for (i = 0; i < dmac->n_channels; ++i) { in rcar_dmac_probe()
1895 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], in rcar_dmac_probe()
1902 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate, in rcar_dmac_probe()
1910 * Default transfer size of 32 bytes requires 32-byte alignment. in rcar_dmac_probe()
1919 of_dma_controller_free(pdev->dev.of_node); in rcar_dmac_probe()
1920 pm_runtime_disable(&pdev->dev); in rcar_dmac_probe()
1928 of_dma_controller_free(pdev->dev.of_node); in rcar_dmac_remove()
1929 dma_async_device_unregister(&dmac->engine); in rcar_dmac_remove()
1931 pm_runtime_disable(&pdev->dev); in rcar_dmac_remove()
1944 { .compatible = "renesas,rcar-dmac", },
1952 .name = "rcar-dmac",
1962 MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");