/drivers/dma/ |
D | owl-dma.c | 191 struct owl_dma_vchan *vchan; member 380 static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan, in owl_dma_cfg_lli() argument 387 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); in owl_dma_cfg_lli() 400 mode |= OWL_DMA_MODE_TS(vchan->drq) in owl_dma_cfg_lli() 413 mode |= OWL_DMA_MODE_TS(vchan->drq) in owl_dma_cfg_lli() 468 struct owl_dma_vchan *vchan) in owl_dma_get_pchan() argument 478 if (!pchan->vchan) { in owl_dma_get_pchan() 479 pchan->vchan = vchan; in owl_dma_get_pchan() 519 pchan->vchan = NULL; in owl_dma_terminate_pchan() 534 static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan) in owl_dma_start_next_txd() argument [all …]
|
D | sun4i-dma.c | 129 struct sun4i_dma_vchan *vchan; member 212 struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); in sun4i_dma_free_chan_resources() local 214 vchan_free_chan_resources(&vchan->vc); in sun4i_dma_free_chan_resources() 218 struct sun4i_dma_vchan *vchan) in find_and_use_pchan() argument 228 if (vchan->is_dedicated) { in find_and_use_pchan() 239 pchan->vchan = vchan; in find_and_use_pchan() 256 pchan->vchan = NULL; in release_pchan() 320 struct sun4i_dma_vchan *vchan) in __execute_vchan_pending() argument 328 lockdep_assert_held(&vchan->vc.lock); in __execute_vchan_pending() 331 pchan = find_and_use_pchan(priv, vchan); in __execute_vchan_pending() [all …]
|
D | sun6i-dma.c | 170 struct sun6i_vchan *vchan; member 387 static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan, in sun6i_dma_dump_lli() argument 392 dev_dbg(chan2dev(&vchan->vc.chan), in sun6i_dma_dump_lli() 427 static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) in sun6i_dma_start_desc() argument 429 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device); in sun6i_dma_start_desc() 430 struct virt_dma_desc *desc = vchan_next_desc(&vchan->vc); in sun6i_dma_start_desc() 431 struct sun6i_pchan *pchan = vchan->phy; in sun6i_dma_start_desc() 448 sun6i_dma_dump_lli(vchan, pchan->desc->v_lli); in sun6i_dma_start_desc() 453 vchan->irq_type = vchan->cyclic ? DMA_IRQ_PKG : DMA_IRQ_QUEUE; in sun6i_dma_start_desc() 458 irq_val |= vchan->irq_type << (irq_offset * DMA_IRQ_CHAN_WIDTH); in sun6i_dma_start_desc() [all …]
|
D | st_fdma.c | 24 return container_of(c, struct st_fdma_chan, vchan.chan); in to_st_fdma_chan() 79 vdesc = vchan_next_desc(&fchan->vchan); in st_fdma_xfer_desc() 85 cmd = FDMA_CMD_START(fchan->vchan.chan.chan_id); in st_fdma_xfer_desc() 94 dev_dbg(fchan->fdev->dev, "start chan:%d\n", fchan->vchan.chan.chan_id); in st_fdma_xfer_desc() 101 int ch_id = fchan->vchan.chan.chan_id; in st_fdma_ch_sta_update() 139 spin_lock(&fchan->vchan.lock); in st_fdma_irq_handler() 157 spin_unlock(&fchan->vchan.lock); in st_fdma_irq_handler() 282 fchan->vchan.chan.chan_id, fchan->cfg.type); in st_fdma_alloc_chan_res() 294 __func__, fchan->vchan.chan.chan_id); in st_fdma_free_chan_res() 299 spin_lock_irqsave(&fchan->vchan.lock, flags); in st_fdma_free_chan_res() [all …]
|
D | fsl-edma-common.c | 48 u32 ch = fsl_chan->vchan.chan.chan_id; in fsl_edma_enable_request() 65 u32 ch = fsl_chan->vchan.chan.chan_id; in fsl_edma_disable_request() 109 u32 ch = fsl_chan->vchan.chan.chan_id; in fsl_edma_chan_mux() 116 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; in fsl_edma_chan_mux() 166 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); in fsl_edma_terminate_all() 170 vchan_get_all_descriptors(&fsl_chan->vchan, &head); in fsl_edma_terminate_all() 171 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); in fsl_edma_terminate_all() 172 vchan_dma_desc_free_list(&fsl_chan->vchan, &head); in fsl_edma_terminate_all() 182 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); in fsl_edma_pause() 188 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); in fsl_edma_pause() [all …]
|
D | idma64.c | 107 struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device); in idma64_stop_transfer() 114 struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device); in idma64_start_transfer() 118 vdesc = vchan_next_desc(&idma64c->vchan); in idma64_start_transfer() 142 spin_lock(&idma64c->vchan.lock); in idma64_chan_irq() 159 spin_unlock(&idma64c->vchan.lock); in idma64_chan_irq() 326 return vchan_tx_prep(&idma64c->vchan, &desc->vdesc, flags); in idma64_prep_slave_sg() 334 spin_lock_irqsave(&idma64c->vchan.lock, flags); in idma64_issue_pending() 335 if (vchan_issue_pending(&idma64c->vchan) && !idma64c->desc) in idma64_issue_pending() 337 spin_unlock_irqrestore(&idma64c->vchan.lock, flags); in idma64_issue_pending() 378 spin_lock_irqsave(&idma64c->vchan.lock, flags); in idma64_tx_status() [all …]
|
D | dma-jz4780.c | 129 struct virt_dma_chan vchan; member 166 return container_of(chan, struct jz4780_dma_chan, vchan.chan); in to_jz4780_dma_chan() 178 return container_of(jzchan->vchan.chan.device, struct jz4780_dma_dev, in jz4780_dma_chan_parent() 395 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); in jz4780_dma_prep_slave_sg() 447 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); in jz4780_dma_prep_dma_cyclic() 475 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); in jz4780_dma_prep_dma_memcpy() 486 vdesc = vchan_next_desc(&jzchan->vchan); in jz4780_dma_begin() 558 spin_lock_irqsave(&jzchan->vchan.lock, flags); in jz4780_dma_issue_pending() 560 if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc) in jz4780_dma_issue_pending() 563 spin_unlock_irqrestore(&jzchan->vchan.lock, flags); in jz4780_dma_issue_pending() [all …]
|
D | dma-axi-dmac.c | 120 struct virt_dma_chan vchan; member 152 return container_of(chan->vchan.chan.device, struct axi_dmac, in chan_to_axi_dmac() 158 return container_of(c, struct axi_dmac_chan, vchan.chan); in to_axi_dmac_chan() 219 vdesc = vchan_next_desc(&chan->vchan); in axi_dmac_start_transfer() 419 spin_lock(&dmac->chan.vchan.lock); in axi_dmac_interrupt_handler() 430 spin_unlock(&dmac->chan.vchan.lock); in axi_dmac_interrupt_handler() 442 spin_lock_irqsave(&chan->vchan.lock, flags); in axi_dmac_terminate_all() 445 vchan_get_all_descriptors(&chan->vchan, &head); in axi_dmac_terminate_all() 447 spin_unlock_irqrestore(&chan->vchan.lock, flags); in axi_dmac_terminate_all() 449 vchan_dma_desc_free_list(&chan->vchan, &head); in axi_dmac_terminate_all() [all …]
|
D | stm32-mdma.c | 258 struct virt_dma_chan vchan; member 285 return container_of(chan->vchan.chan.device, struct stm32_mdma_device, in stm32_mdma_get_dev() 291 return container_of(c, struct stm32_mdma_chan, vchan.chan); in to_stm32_mdma_chan() 301 return &chan->vchan.chan.dev->device; in chan2dev() 808 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); in stm32_mdma_prep_slave_sg() 899 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); in stm32_mdma_prep_dma_cyclic() 1085 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); in stm32_mdma_prep_dma_memcpy() 1122 vdesc = vchan_next_desc(&chan->vchan); in stm32_mdma_start_transfer() 1163 dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan); in stm32_mdma_start_transfer() 1171 spin_lock_irqsave(&chan->vchan.lock, flags); in stm32_mdma_issue_pending() [all …]
|
D | stm32-dma.c | 199 struct virt_dma_chan vchan; member 223 return container_of(chan->vchan.chan.device, struct stm32_dma_device, in stm32_dma_get_dev() 229 return container_of(c, struct stm32_dma_chan, vchan.chan); in to_stm32_dma_chan() 239 return &chan->vchan.chan.dev->device; in chan2dev() 496 spin_lock_irqsave(&chan->vchan.lock, flags); in stm32_dma_terminate_all() 505 vchan_get_all_descriptors(&chan->vchan, &head); in stm32_dma_terminate_all() 506 spin_unlock_irqrestore(&chan->vchan.lock, flags); in stm32_dma_terminate_all() 507 vchan_dma_desc_free_list(&chan->vchan, &head); in stm32_dma_terminate_all() 516 vchan_synchronize(&chan->vchan); in stm32_dma_synchronize() 553 vdesc = vchan_next_desc(&chan->vchan); in stm32_dma_start_transfer() [all …]
|
D | fsl-qdma.c | 177 struct virt_dma_chan vchan; member 297 return container_of(chan, struct fsl_qdma_chan, vchan.chan); in to_fsl_qdma_chan() 314 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); in fsl_qdma_free_chan_resources() 315 vchan_get_all_descriptors(&fsl_chan->vchan, &head); in fsl_qdma_free_chan_resources() 316 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); in fsl_qdma_free_chan_resources() 318 vchan_dma_desc_free_list(&fsl_chan->vchan, &head); in fsl_qdma_free_chan_resources() 731 spin_lock(&fsl_comp->qchan->vchan.lock); in fsl_qdma_queue_transfer_complete() 734 spin_unlock(&fsl_comp->qchan->vchan.lock); in fsl_qdma_queue_transfer_complete() 987 return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags); in fsl_qdma_prep_memcpy() 1001 vdesc = vchan_next_desc(&fsl_chan->vchan); in fsl_qdma_enqueue_desc() [all …]
|
D | st_fdma.h | 125 struct virt_dma_chan vchan; member 186 + (fchan)->vchan.chan.chan_id * 0x4 \ 191 + (fchan)->vchan.chan.chan_id * 0x4 \ 208 + (fchan)->vchan.chan.chan_id * FDMA_NODE_SZ \ 213 + (fchan)->vchan.chan.chan_id * FDMA_NODE_SZ \
|
D | fsl-edma.c | 27 vchan_synchronize(&fsl_chan->vchan); in fsl_edma_synchronize() 47 spin_lock(&fsl_chan->vchan.lock); in fsl_edma_tx_handler() 51 spin_unlock(&fsl_chan->vchan.lock); in fsl_edma_tx_handler() 68 spin_unlock(&fsl_chan->vchan.lock); in fsl_edma_tx_handler() 360 fsl_chan->vchan.desc_free = fsl_edma_free_desc; in fsl_edma_probe() 361 vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); in fsl_edma_probe() 443 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); in fsl_edma_suspend_late() 452 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); in fsl_edma_suspend_late()
|
D | pxa_dma.c | 100 struct pxad_chan *vchan; member 151 dev_vdbg(&phy->vchan->vc.chan.dev->device, \ 159 dev_vdbg(&phy->vchan->vc.chan.dev->device, \ 166 dev_vdbg(&phy->vchan->vc.chan.dev->device, \ 390 if (!phy->vchan) { in lookup_phy() 391 phy->vchan = pchan; in lookup_phy() 425 chan->phy->vchan = NULL; in pxad_free_phy() 455 if (!phy->vchan) in phy_enable() 458 dev_dbg(&phy->vchan->vc.chan.dev->device, in phy_enable() 462 pdev = to_pxad_dev(phy->vchan->vc.chan.device); in phy_enable() [all …]
|
D | mcf-edma.c | 37 spin_lock(&mcf_chan->vchan.lock); in mcf_edma_tx_handler() 41 spin_unlock(&mcf_chan->vchan.lock); in mcf_edma_tx_handler() 58 spin_unlock(&mcf_chan->vchan.lock); in mcf_edma_tx_handler() 231 mcf_chan->vchan.desc_free = fsl_edma_free_desc; in mcf_edma_probe() 232 vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev); in mcf_edma_probe()
|
D | mmp_pdma.c | 119 struct mmp_pdma_chan *vchan; member 155 if (!phy->vchan) in enable_chan() 158 reg = DRCMR(phy->vchan->drcmr); in enable_chan() 162 if (phy->vchan->byte_align) in enable_chan() 195 if ((dcsr & DCSR_BUSERR) && (phy->vchan)) in clear_chan_irq() 196 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); in clear_chan_irq() 208 tasklet_schedule(&phy->vchan->tasklet); in mmp_pdma_chan_handler() 260 if (!phy->vchan) { in lookup_phy() 261 phy->vchan = pchan; in lookup_phy() 287 pchan->phy->vchan = NULL; in mmp_pdma_free_phy()
|
/drivers/dma/sf-pdma/ |
D | sf-pdma.c | 44 return container_of(dchan, struct sf_pdma_chan, vchan.chan); in to_sf_pdma_chan() 100 desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); in sf_pdma_prep_dma_memcpy() 102 spin_lock_irqsave(&chan->vchan.lock, iflags); in sf_pdma_prep_dma_memcpy() 104 spin_unlock_irqrestore(&chan->vchan.lock, iflags); in sf_pdma_prep_dma_memcpy() 143 spin_lock_irqsave(&chan->vchan.lock, flags); in sf_pdma_free_chan_resources() 147 vchan_get_all_descriptors(&chan->vchan, &head); in sf_pdma_free_chan_resources() 149 spin_unlock_irqrestore(&chan->vchan.lock, flags); in sf_pdma_free_chan_resources() 150 vchan_dma_desc_free_list(&chan->vchan, &head); in sf_pdma_free_chan_resources() 163 spin_lock_irqsave(&chan->vchan.lock, flags); in sf_pdma_desc_residue() 165 list_for_each_entry(vd, &chan->vchan.desc_submitted, node) in sf_pdma_desc_residue() [all …]
|
/drivers/dma/hsu/ |
D | hsu.c | 113 vdesc = vchan_next_desc(&hsuc->vchan); in hsu_dma_start_transfer() 160 spin_lock_irqsave(&hsuc->vchan.lock, flags); in hsu_dma_get_status() 162 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); in hsu_dma_get_status() 214 stat = this_cpu_ptr(hsuc->vchan.chan.local); in hsu_dma_do_irq() 216 spin_lock_irqsave(&hsuc->vchan.lock, flags); in hsu_dma_do_irq() 230 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); in hsu_dma_do_irq() 287 return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags); in hsu_dma_prep_slave_sg() 295 spin_lock_irqsave(&hsuc->vchan.lock, flags); in hsu_dma_issue_pending() 296 if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc) in hsu_dma_issue_pending() 298 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); in hsu_dma_issue_pending() [all …]
|
D | hsu.h | 83 struct virt_dma_chan vchan; member 96 return container_of(chan, struct hsu_dma_chan, vchan.chan); in to_hsu_dma_chan()
|
/drivers/dma/lgm/ |
D | lgm-dma.c | 195 struct virt_dma_chan vchan; member 292 return container_of(chan, struct ldma_chan, vchan.chan); in to_ldma_chan() 521 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); in ldma_chan_cctrl_cfg() 550 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); in ldma_chan_irq_init() 578 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); in ldma_chan_set_class() 596 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); in ldma_chan_on() 615 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); in ldma_chan_off() 638 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); in ldma_chan_desc_hw_cfg() 662 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); in ldma_chan_desc_cfg() 696 struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); in ldma_chan_reset() [all …]
|
/drivers/staging/ralink-gdma/ |
D | ralink-gdma.c | 107 struct virt_dma_chan vchan; member 140 return container_of(chan->vchan.chan.device, struct gdma_dma_dev, in gdma_dma_chan_get_dev() 146 return container_of(c, struct gdma_dmaengine_chan, vchan.chan); in to_gdma_dma_chan() 227 spin_lock_irqsave(&chan->vchan.lock, flags); in gdma_dma_terminate_all() 230 vchan_get_all_descriptors(&chan->vchan, &head); in gdma_dma_terminate_all() 231 spin_unlock_irqrestore(&chan->vchan.lock, flags); in gdma_dma_terminate_all() 233 vchan_dma_desc_free_list(&chan->vchan, &head); in gdma_dma_terminate_all() 416 vdesc = vchan_next_desc(&chan->vchan); in gdma_next_desc() 435 spin_lock_irqsave(&chan->vchan.lock, flags); in gdma_dma_chan_irq() 459 spin_unlock_irqrestore(&chan->vchan.lock, flags); in gdma_dma_chan_irq() [all …]
|
/drivers/staging/mt7621-dma/ |
D | hsdma-mt7621.c | 144 struct virt_dma_chan vchan; member 168 return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine, in mtk_hsdma_chan_get_dev() 174 return container_of(c, struct mtk_hsdma_chan, vchan.chan); in to_mtk_hsdma_chan() 290 spin_lock_bh(&chan->vchan.lock); in mtk_hsdma_terminate_all() 293 vchan_get_all_descriptors(&chan->vchan, &head); in mtk_hsdma_terminate_all() 294 spin_unlock_bh(&chan->vchan.lock); in mtk_hsdma_terminate_all() 296 vchan_dma_desc_free_list(&chan->vchan, &head); in mtk_hsdma_terminate_all() 389 vdesc = vchan_next_desc(&chan->vchan); in gdma_next_desc() 407 spin_lock_bh(&chan->vchan.lock); in mtk_hsdma_chan_done() 421 spin_unlock_bh(&chan->vchan.lock); in mtk_hsdma_chan_done() [all …]
|
/drivers/dma/fsl-dpaa2-qdma/ |
D | dpaa2-qdma.c | 21 return container_of(chan, struct dpaa2_qdma_chan, vchan.chan); in to_dpaa2_qdma_chan() 73 spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags); in dpaa2_qdma_free_chan_resources() 74 vchan_get_all_descriptors(&dpaa2_chan->vchan, &head); in dpaa2_qdma_free_chan_resources() 75 spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags); in dpaa2_qdma_free_chan_resources() 77 vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head); in dpaa2_qdma_free_chan_resources() 269 return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags); in dpaa2_qdma_prep_memcpy() 282 spin_lock(&dpaa2_chan->vchan.lock); in dpaa2_qdma_issue_pending() 283 if (vchan_issue_pending(&dpaa2_chan->vchan)) { in dpaa2_qdma_issue_pending() 284 vdesc = vchan_next_desc(&dpaa2_chan->vchan); in dpaa2_qdma_issue_pending() 301 spin_unlock(&dpaa2_chan->vchan.lock); in dpaa2_qdma_issue_pending() [all …]
|
/drivers/dma/xilinx/ |
D | xilinx_dpdma.c | 225 struct virt_dma_chan vchan; member 247 container_of(_chan, struct xilinx_dpdma_chan, vchan.chan) 845 vdesc = vchan_next_desc(&chan->vchan); in xilinx_dpdma_chan_queue_transfer() 1175 list_empty(&chan->vchan.desc_issued)) { in xilinx_dpdma_chan_handle_err() 1178 &chan->vchan.desc_issued); in xilinx_dpdma_chan_handle_err() 1212 vchan_tx_prep(&chan->vchan, &desc->vdesc, flags | DMA_CTRL_ACK); in xilinx_dpdma_prep_interleaved_dma() 1255 vchan_free_chan_resources(&chan->vchan); in xilinx_dpdma_free_chan_resources() 1266 spin_lock_irqsave(&chan->vchan.lock, flags); in xilinx_dpdma_issue_pending() 1267 if (vchan_issue_pending(&chan->vchan)) in xilinx_dpdma_issue_pending() 1269 spin_unlock_irqrestore(&chan->vchan.lock, flags); in xilinx_dpdma_issue_pending() [all …]
|
/drivers/dma/ti/ |
D | edma.c | 225 struct virt_dma_chan vchan; member 761 return container_of(c, struct edma_chan, vchan.chan); in to_edma_chan() 780 struct device *dev = echan->vchan.chan.device->dev; in edma_execute() 785 vdesc = vchan_next_desc(&echan->vchan); in edma_execute() 874 spin_lock_irqsave(&echan->vchan.lock, flags); in edma_terminate_all() 891 vchan_get_all_descriptors(&echan->vchan, &head); in edma_terminate_all() 892 spin_unlock_irqrestore(&echan->vchan.lock, flags); in edma_terminate_all() 893 vchan_dma_desc_free_list(&echan->vchan, &head); in edma_terminate_all() 902 vchan_synchronize(&echan->vchan); in edma_synchronize() 1159 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); in edma_prep_slave_sg() [all …]
|