Home
last modified time | relevance | path

Searched refs:async_tx (Results 1 – 22 of 22) sorted by relevance

/drivers/dma/
Dmmp_pdma.c84 struct dma_async_tx_descriptor async_tx; member
132 container_of(tx, struct mmp_pdma_desc_sw, async_tx)
333 set_desc(chan->phy, desc->async_tx.phys); in start_pending_queue()
351 cookie = dma_cookie_assign(&child->async_tx); in mmp_pdma_tx_submit()
375 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); in mmp_pdma_alloc_descriptor()
377 desc->async_tx.tx_submit = mmp_pdma_tx_submit; in mmp_pdma_alloc_descriptor()
378 desc->async_tx.phys = pdesc; in mmp_pdma_alloc_descriptor()
421 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in mmp_pdma_free_desc_list()
486 prev->desc.ddadr = new->async_tx.phys; in mmp_pdma_prep_memcpy()
488 new->async_tx.cookie = 0; in mmp_pdma_prep_memcpy()
[all …]
Dmv_xor_v2.c183 struct dma_async_tx_descriptor async_tx; member
308 container_of(tx, struct mv_xor_v2_sw_desc, async_tx); in mv_xor_v2_tx_submit()
314 __func__, sw_desc, &sw_desc->async_tx); in mv_xor_v2_tx_submit()
355 if (async_tx_test_ack(&sw_desc->async_tx)) { in mv_xor_v2_prep_sw_desc()
395 sw_desc->async_tx.flags = flags; in mv_xor_v2_prep_dma_memcpy()
424 return &sw_desc->async_tx; in mv_xor_v2_prep_dma_memcpy()
451 sw_desc->async_tx.flags = flags; in mv_xor_v2_prep_dma_xor()
483 return &sw_desc->async_tx; in mv_xor_v2_prep_dma_xor()
513 return &sw_desc->async_tx; in mv_xor_v2_prep_dma_interrupt()
578 if (next_pending_sw_desc->async_tx.cookie > 0) { in mv_xor_v2_tasklet()
[all …]
Dmv_xor.c43 container_of(tx, struct mv_xor_desc_slot, async_tx)
185 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); in mv_chan_start_new_chain()
196 BUG_ON(desc->async_tx.cookie < 0); in mv_desc_run_tx_complete_actions()
198 if (desc->async_tx.cookie > 0) { in mv_desc_run_tx_complete_actions()
199 cookie = desc->async_tx.cookie; in mv_desc_run_tx_complete_actions()
201 dma_descriptor_unmap(&desc->async_tx); in mv_desc_run_tx_complete_actions()
205 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); in mv_desc_run_tx_complete_actions()
209 dma_run_dependencies(&desc->async_tx); in mv_desc_run_tx_complete_actions()
223 if (async_tx_test_ack(&iter->async_tx)) { in mv_chan_clean_completed_slots()
239 __func__, __LINE__, desc, desc->async_tx.flags); in mv_desc_clean_slot()
[all …]
Dfsldma.c397 set_desc_next(chan, &tail->hw, desc->async_tx.phys); in append_ld_queue()
429 cookie = dma_cookie_assign(&child->async_tx); in fsl_dma_tx_submit()
450 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in fsl_dma_free_descriptor()
471 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); in fsl_dma_alloc_descriptor()
472 desc->async_tx.tx_submit = fsl_dma_tx_submit; in fsl_dma_alloc_descriptor()
473 desc->async_tx.phys = pdesc; in fsl_dma_alloc_descriptor()
494 if (async_tx_test_ack(&desc->async_tx)) in fsldma_clean_completed_descriptor()
510 struct dma_async_tx_descriptor *txd = &desc->async_tx; in fsldma_run_tx_complete_actions()
548 if (!async_tx_test_ack(&desc->async_tx)) { in fsldma_clean_running_descriptor()
557 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in fsldma_clean_running_descriptor()
[all …]
Daltera-msgdma.c161 struct dma_async_tx_descriptor async_tx; member
202 #define tx_to_desc(tx) container_of(tx, struct msgdma_sw_desc, async_tx)
371 async_tx_ack(&first->async_tx); in msgdma_prep_memcpy()
372 first->async_tx.flags = flags; in msgdma_prep_memcpy()
374 return &first->async_tx; in msgdma_prep_memcpy()
456 first->async_tx.flags = flags; in msgdma_prep_slave_sg()
458 return &first->async_tx; in msgdma_prep_slave_sg()
592 dmaengine_desc_get_callback(&desc->async_tx, &cb); in msgdma_chan_desc_cleanup()
617 dma_cookie_complete(&desc->async_tx); in msgdma_complete_descriptor()
670 dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan); in msgdma_alloc_chan_resources()
[all …]
Dfsl_raid.c84 #define to_fsl_re_dma_desc(tx) container_of(tx, struct fsl_re_desc, async_tx)
138 dma_cookie_complete(&desc->async_tx); in fsl_re_desc_done()
139 dma_descriptor_unmap(&desc->async_tx); in fsl_re_desc_done()
140 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); in fsl_re_desc_done()
150 if (async_tx_test_ack(&desc->async_tx)) in fsl_re_cleanup_descs()
255 desc->async_tx.tx_submit = fsl_re_tx_submit; in fsl_re_init_desc()
256 dma_async_tx_descriptor_init(&desc->async_tx, &re_chan->chan); in fsl_re_init_desc()
288 desc->async_tx.flags = flags; in fsl_re_chan_alloc_desc()
305 desc->async_tx.flags = flags; in fsl_re_chan_alloc_desc()
383 return &desc->async_tx; in fsl_re_prep_dma_genq()
[all …]
Dnbpfaxi.c151 struct dma_async_tx_descriptor async_tx; member
636 running = chan->running ? chan->running->async_tx.cookie : -EINVAL; in nbpf_tx_status()
647 if (desc->async_tx.cookie == cookie) { in nbpf_tx_status()
654 if (desc->async_tx.cookie == cookie) { in nbpf_tx_status()
674 struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx); in nbpf_tx_submit()
722 dma_async_tx_descriptor_init(&desc->async_tx, dchan); in nbpf_desc_page_alloc()
723 desc->async_tx.tx_submit = nbpf_tx_submit; in nbpf_desc_page_alloc()
764 if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) { in nbpf_scan_acked()
857 __func__, desc, desc->async_tx.cookie); in nbpf_chan_idle()
955 desc->async_tx.flags = flags; in nbpf_prep_sg()
[all …]
Dfsldma.h104 struct dma_async_tx_descriptor async_tx; member
192 #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
Dmv_xor.h147 struct dma_async_tx_descriptor async_tx; member
Dfsl_raid.h294 struct dma_async_tx_descriptor async_tx; member
DKconfig766 bool "Async_tx: Offload support for the async_tx api"
769 This allows the async_tx api to take advantage of offload engines for
Dpxa_dma.c137 container_of(tx, struct pxad_desc_sw, async_tx)
/drivers/dma/sh/
Dshdma-base.c73 container_of(tx, struct shdma_desc, async_tx); in shdma_tx_submit()
92 chunk->async_tx.cookie > 0 || in shdma_tx_submit()
93 chunk->async_tx.cookie == -EBUSY || in shdma_tx_submit()
98 chunk->async_tx.callback = callback; in shdma_tx_submit()
99 chunk->async_tx.callback_param = tx->callback_param; in shdma_tx_submit()
102 chunk->async_tx.callback = NULL; in shdma_tx_submit()
108 tx->cookie, &chunk->async_tx, schan->id); in shdma_tx_submit()
237 dma_async_tx_descriptor_init(&desc->async_tx, in shdma_alloc_chan_resources()
239 desc->async_tx.tx_submit = shdma_tx_submit; in shdma_alloc_chan_resources()
337 struct dma_async_tx_descriptor *tx = &desc->async_tx; in __ld_cleanup()
[all …]
Drcar-dmac.c73 struct dma_async_tx_descriptor async_tx; member
94 #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
440 else if (desc->async_tx.callback) in rcar_dmac_chan_start_xfer()
544 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); in rcar_dmac_desc_alloc()
545 desc->async_tx.tx_submit = rcar_dmac_tx_submit; in rcar_dmac_desc_alloc()
599 if (async_tx_test_ack(&desc->async_tx)) { in rcar_dmac_desc_recycle_acked()
950 desc->async_tx.flags = dma_flags; in rcar_dmac_chan_prep_sg()
951 desc->async_tx.cookie = -EBUSY; in rcar_dmac_chan_prep_sg()
1047 return &desc->async_tx; in rcar_dmac_chan_prep_sg()
1351 if (cookie != desc->async_tx.cookie) { in rcar_dmac_chan_get_residue()
[all …]
Dshdma.h57 #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
Dshdmac.c289 sdesc->async_tx.cookie, sh_chan->shdma_chan.id, in sh_dmae_start_xfer()
/drivers/dma/ppc4xx/
Dadma.c1466 BUG_ON(desc->async_tx.cookie < 0); in ppc440spe_adma_run_tx_complete_actions()
1467 if (desc->async_tx.cookie > 0) { in ppc440spe_adma_run_tx_complete_actions()
1468 cookie = desc->async_tx.cookie; in ppc440spe_adma_run_tx_complete_actions()
1469 desc->async_tx.cookie = 0; in ppc440spe_adma_run_tx_complete_actions()
1471 dma_descriptor_unmap(&desc->async_tx); in ppc440spe_adma_run_tx_complete_actions()
1475 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); in ppc440spe_adma_run_tx_complete_actions()
1479 dma_run_dependencies(&desc->async_tx); in ppc440spe_adma_run_tx_complete_actions()
1493 if (!async_tx_test_ack(&desc->async_tx)) in ppc440spe_adma_clean_slot()
1559 iter->async_tx.cookie, iter->idx, busy, iter->phys, in __ppc440spe_adma_slot_cleanup()
1561 async_tx_test_ack(&iter->async_tx)); in __ppc440spe_adma_slot_cleanup()
[all …]
Dadma.h20 container_of(tx, struct ppc440spe_adma_desc_slot, async_tx)
147 struct dma_async_tx_descriptor async_tx; member
/drivers/dma/xilinx/
Dxilinx_dma.c370 struct dma_async_tx_descriptor async_tx; member
527 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
1033 dmaengine_desc_get_callback(&desc->async_tx, &cb); in xilinx_dma_chan_handle_cyclic()
1076 dmaengine_desc_get_callback_invoke(&desc->async_tx, &result); in xilinx_dma_chan_desc_cleanup()
1080 dma_run_dependencies(&desc->async_tx); in xilinx_dma_chan_desc_cleanup()
1499 head_desc->async_tx.phys); in xilinx_cdma_start_transfer()
1567 head_desc->async_tx.phys); in xilinx_dma_start_transfer()
1651 head_desc->async_tx.phys); in xilinx_mcdma_start_transfer()
1736 dma_cookie_complete(&desc->async_tx); in xilinx_dma_complete_descriptor()
1948 tail_segment->hw.next_desc = (u32)desc->async_tx.phys; in append_desc_queue()
[all …]
Dzynqmp_dma.c146 async_tx)
183 struct dma_async_tx_descriptor async_tx; member
484 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); in zynqmp_dma_alloc_chan_resources()
485 desc->async_tx.tx_submit = zynqmp_dma_tx_submit; in zynqmp_dma_alloc_chan_resources()
612 dmaengine_desc_get_callback(&desc->async_tx, &cb); in zynqmp_dma_chan_desc_cleanup()
639 dma_cookie_complete(&desc->async_tx); in zynqmp_dma_complete_descriptor()
863 async_tx_ack(&first->async_tx); in zynqmp_dma_prep_memcpy()
864 first->async_tx.flags = (enum dma_ctrl_flags)flags; in zynqmp_dma_prep_memcpy()
865 return &first->async_tx; in zynqmp_dma_prep_memcpy()
/drivers/dma/sf-pdma/
Dsf-pdma.h82 struct dma_async_tx_descriptor *async_tx; member
Dsf-pdma.c100 desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); in sf_pdma_prep_dma_memcpy()
106 return desc->async_tx; in sf_pdma_prep_dma_memcpy()
329 dmaengine_desc_get_callback_invoke(desc->async_tx, NULL); in sf_pdma_errbh_tasklet()