• Home
  • Raw
  • Download

Lines Matching +full:dma +full:- +full:maxburst

11 #include <linux/dma-mapping.h>
25 #include "virt-dma.h"
27 #define DRIVER_NAME "zx-dma"
29 #define DMA_MAX_SIZE (0x10000 - 512)
145 val = readl_relaxed(phy->base + REG_ZX_CTRL); in zx_dma_terminate_chan()
148 writel_relaxed(val, phy->base + REG_ZX_CTRL); in zx_dma_terminate_chan()
150 val = 0x1 << phy->idx; in zx_dma_terminate_chan()
151 writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW); in zx_dma_terminate_chan()
152 writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW); in zx_dma_terminate_chan()
153 writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW); in zx_dma_terminate_chan()
154 writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW); in zx_dma_terminate_chan()
159 writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR); in zx_dma_set_desc()
160 writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR); in zx_dma_set_desc()
161 writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT); in zx_dma_set_desc()
162 writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT); in zx_dma_set_desc()
163 writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP); in zx_dma_set_desc()
164 writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP); in zx_dma_set_desc()
165 writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR); in zx_dma_set_desc()
166 writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL); in zx_dma_set_desc()
171 return readl_relaxed(phy->base + REG_ZX_LLI_ADDR); in zx_dma_get_curr_lli()
176 return readl_relaxed(d->base + REG_ZX_STATUS); in zx_dma_get_chan_stat()
182 writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB); in zx_dma_init_state()
184 writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW); in zx_dma_init_state()
185 writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW); in zx_dma_init_state()
186 writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW); in zx_dma_init_state()
187 writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW); in zx_dma_init_state()
192 struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device); in zx_dma_start_txd()
193 struct virt_dma_desc *vd = vchan_next_desc(&c->vc); in zx_dma_start_txd()
195 if (!c->phy) in zx_dma_start_txd()
196 return -EAGAIN; in zx_dma_start_txd()
198 if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d)) in zx_dma_start_txd()
199 return -EAGAIN; in zx_dma_start_txd()
205 * fetch and remove request from vc->desc_issued in zx_dma_start_txd()
206 * so vc->desc_issued only contains desc pending in zx_dma_start_txd()
208 list_del(&ds->vd.node); in zx_dma_start_txd()
209 c->phy->ds_run = ds; in zx_dma_start_txd()
210 c->phy->ds_done = NULL; in zx_dma_start_txd()
211 /* start dma */ in zx_dma_start_txd()
212 zx_dma_set_desc(c->phy, ds->desc_hw); in zx_dma_start_txd()
215 c->phy->ds_done = NULL; in zx_dma_start_txd()
216 c->phy->ds_run = NULL; in zx_dma_start_txd()
217 return -EAGAIN; in zx_dma_start_txd()
227 /* check new dma request of running channel in vc->desc_issued */ in zx_dma_task()
228 list_for_each_entry_safe(c, cn, &d->slave.channels, in zx_dma_task()
230 spin_lock_irqsave(&c->vc.lock, flags); in zx_dma_task()
231 p = c->phy; in zx_dma_task()
232 if (p && p->ds_done && zx_dma_start_txd(c)) { in zx_dma_task()
234 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx); in zx_dma_task()
236 c->phy = NULL; in zx_dma_task()
237 p->vchan = NULL; in zx_dma_task()
239 spin_unlock_irqrestore(&c->vc.lock, flags); in zx_dma_task()
242 /* check new channel request in d->chan_pending */ in zx_dma_task()
243 spin_lock_irqsave(&d->lock, flags); in zx_dma_task()
244 while (!list_empty(&d->chan_pending)) { in zx_dma_task()
245 c = list_first_entry(&d->chan_pending, in zx_dma_task()
247 p = &d->phy[c->id]; in zx_dma_task()
248 if (!p->vchan) { in zx_dma_task()
249 /* remove from d->chan_pending */ in zx_dma_task()
250 list_del_init(&c->node); in zx_dma_task()
251 pch_alloc |= 1 << c->id; in zx_dma_task()
253 p->vchan = c; in zx_dma_task()
254 c->phy = p; in zx_dma_task()
256 dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id); in zx_dma_task()
259 spin_unlock_irqrestore(&d->lock, flags); in zx_dma_task()
261 for (pch = 0; pch < d->dma_channels; pch++) { in zx_dma_task()
263 p = &d->phy[pch]; in zx_dma_task()
264 c = p->vchan; in zx_dma_task()
266 spin_lock_irqsave(&c->vc.lock, flags); in zx_dma_task()
268 spin_unlock_irqrestore(&c->vc.lock, flags); in zx_dma_task()
279 u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ); in zx_dma_int_handler()
280 u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ); in zx_dma_int_handler()
281 u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ); in zx_dma_int_handler()
282 u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ); in zx_dma_int_handler()
288 p = &d->phy[i]; in zx_dma_int_handler()
289 c = p->vchan; in zx_dma_int_handler()
293 spin_lock_irqsave(&c->vc.lock, flags); in zx_dma_int_handler()
294 if (c->cyclic) { in zx_dma_int_handler()
295 vchan_cyclic_callback(&p->ds_run->vd); in zx_dma_int_handler()
297 vchan_cookie_complete(&p->ds_run->vd); in zx_dma_int_handler()
298 p->ds_done = p->ds_run; in zx_dma_int_handler()
301 spin_unlock_irqrestore(&c->vc.lock, flags); in zx_dma_int_handler()
307 dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n", in zx_dma_int_handler()
310 writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW); in zx_dma_int_handler()
311 writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW); in zx_dma_int_handler()
312 writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW); in zx_dma_int_handler()
313 writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW); in zx_dma_int_handler()
323 struct zx_dma_dev *d = to_zx_dma(chan->device); in zx_dma_free_chan_resources()
326 spin_lock_irqsave(&d->lock, flags); in zx_dma_free_chan_resources()
327 list_del_init(&c->node); in zx_dma_free_chan_resources()
328 spin_unlock_irqrestore(&d->lock, flags); in zx_dma_free_chan_resources()
330 vchan_free_chan_resources(&c->vc); in zx_dma_free_chan_resources()
331 c->ccfg = 0; in zx_dma_free_chan_resources()
345 ret = dma_cookie_status(&c->vc.chan, cookie, state); in zx_dma_tx_status()
349 spin_lock_irqsave(&c->vc.lock, flags); in zx_dma_tx_status()
350 p = c->phy; in zx_dma_tx_status()
351 ret = c->status; in zx_dma_tx_status()
357 vd = vchan_find_desc(&c->vc, cookie); in zx_dma_tx_status()
359 bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size; in zx_dma_tx_status()
360 } else if ((!p) || (!p->ds_run)) { in zx_dma_tx_status()
363 struct zx_dma_desc_sw *ds = p->ds_run; in zx_dma_tx_status()
368 index = (clli - ds->desc_hw_lli) / in zx_dma_tx_status()
370 for (; index < ds->desc_num; index++) { in zx_dma_tx_status()
371 bytes += ds->desc_hw[index].src_x; in zx_dma_tx_status()
373 if (!ds->desc_hw[index].lli) in zx_dma_tx_status()
377 spin_unlock_irqrestore(&c->vc.lock, flags); in zx_dma_tx_status()
385 struct zx_dma_dev *d = to_zx_dma(chan->device); in zx_dma_issue_pending()
389 spin_lock_irqsave(&c->vc.lock, flags); in zx_dma_issue_pending()
390 /* add request to vc->desc_issued */ in zx_dma_issue_pending()
391 if (vchan_issue_pending(&c->vc)) { in zx_dma_issue_pending()
392 spin_lock(&d->lock); in zx_dma_issue_pending()
393 if (!c->phy && list_empty(&c->node)) { in zx_dma_issue_pending()
395 list_add_tail(&c->node, &d->chan_pending); in zx_dma_issue_pending()
397 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); in zx_dma_issue_pending()
399 spin_unlock(&d->lock); in zx_dma_issue_pending()
401 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); in zx_dma_issue_pending()
403 spin_unlock_irqrestore(&c->vc.lock, flags); in zx_dma_issue_pending()
412 if ((num + 1) < ds->desc_num) in zx_dma_fill_desc()
413 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) * in zx_dma_fill_desc()
415 ds->desc_hw[num].saddr = src; in zx_dma_fill_desc()
416 ds->desc_hw[num].daddr = dst; in zx_dma_fill_desc()
417 ds->desc_hw[num].src_x = len; in zx_dma_fill_desc()
418 ds->desc_hw[num].ctr = ccfg; in zx_dma_fill_desc()
426 struct zx_dma_dev *d = to_zx_dma(chan->device); in zx_alloc_desc_resource()
430 dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n", in zx_alloc_desc_resource()
431 &c->vc, num, lli_limit); in zx_alloc_desc_resource()
439 ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli); in zx_alloc_desc_resource()
440 if (!ds->desc_hw) { in zx_alloc_desc_resource()
441 dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc); in zx_alloc_desc_resource()
445 ds->desc_num = num; in zx_alloc_desc_resource()
456 return ffs(width) - 1; in zx_dma_burst_width()
464 struct dma_slave_config *cfg = &c->slave_cfg; in zx_pre_config()
467 u32 maxburst = 0; in zx_pre_config() local
471 c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ in zx_pre_config()
472 | ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1) in zx_pre_config()
477 c->dev_addr = cfg->dst_addr; in zx_pre_config()
483 dst_width = zx_dma_burst_width(cfg->dst_addr_width); in zx_pre_config()
484 maxburst = cfg->dst_maxburst; in zx_pre_config()
485 maxburst = maxburst < ZX_MAX_BURST_LEN ? in zx_pre_config()
486 maxburst : ZX_MAX_BURST_LEN; in zx_pre_config()
487 c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE in zx_pre_config()
488 | ZX_SRC_BURST_LEN(maxburst - 1) in zx_pre_config()
493 c->dev_addr = cfg->src_addr; in zx_pre_config()
494 src_width = zx_dma_burst_width(cfg->src_addr_width); in zx_pre_config()
495 maxburst = cfg->src_maxburst; in zx_pre_config()
496 maxburst = maxburst < ZX_MAX_BURST_LEN ? in zx_pre_config()
497 maxburst : ZX_MAX_BURST_LEN; in zx_pre_config()
498 c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE in zx_pre_config()
499 | ZX_SRC_BURST_LEN(maxburst - 1) in zx_pre_config()
504 return -EINVAL; in zx_pre_config()
530 ds->size = len; in zx_dma_prep_memcpy()
535 zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg); in zx_dma_prep_memcpy()
539 len -= copy; in zx_dma_prep_memcpy()
542 c->cyclic = 0; in zx_dma_prep_memcpy()
543 ds->desc_hw[num - 1].lli = 0; /* end of link */ in zx_dma_prep_memcpy()
544 ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL; in zx_dma_prep_memcpy()
545 return vchan_tx_prep(&c->vc, &ds->vd, flags); in zx_dma_prep_memcpy()
568 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; in zx_dma_prep_slave_sg()
575 c->cyclic = 0; in zx_dma_prep_slave_sg()
587 dst = c->dev_addr; in zx_dma_prep_slave_sg()
589 src = c->dev_addr; in zx_dma_prep_slave_sg()
593 zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg); in zx_dma_prep_slave_sg()
596 avail -= len; in zx_dma_prep_slave_sg()
600 ds->desc_hw[num - 1].lli = 0; /* end of link */ in zx_dma_prep_slave_sg()
601 ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL; in zx_dma_prep_slave_sg()
602 ds->size = total; in zx_dma_prep_slave_sg()
603 return vchan_tx_prep(&c->vc, &ds->vd, flags); in zx_dma_prep_slave_sg()
618 dev_err(chan->device->dev, "maximum period size exceeded\n"); in zx_dma_prep_dma_cyclic()
628 c->cyclic = 1; in zx_dma_prep_dma_cyclic()
633 dst = c->dev_addr; in zx_dma_prep_dma_cyclic()
635 src = c->dev_addr; in zx_dma_prep_dma_cyclic()
639 c->ccfg | ZX_IRQ_ENABLE_ALL); in zx_dma_prep_dma_cyclic()
644 ds->desc_hw[num - 1].lli = ds->desc_hw_lli; in zx_dma_prep_dma_cyclic()
645 ds->size = buf_len; in zx_dma_prep_dma_cyclic()
646 return vchan_tx_prep(&c->vc, &ds->vd, flags); in zx_dma_prep_dma_cyclic()
655 return -EINVAL; in zx_dma_config()
657 memcpy(&c->slave_cfg, cfg, sizeof(*cfg)); in zx_dma_config()
665 struct zx_dma_dev *d = to_zx_dma(chan->device); in zx_dma_terminate_all()
666 struct zx_dma_phy *p = c->phy; in zx_dma_terminate_all()
670 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); in zx_dma_terminate_all()
673 spin_lock(&d->lock); in zx_dma_terminate_all()
674 list_del_init(&c->node); in zx_dma_terminate_all()
675 spin_unlock(&d->lock); in zx_dma_terminate_all()
678 spin_lock_irqsave(&c->vc.lock, flags); in zx_dma_terminate_all()
679 vchan_get_all_descriptors(&c->vc, &head); in zx_dma_terminate_all()
681 /* vchan is assigned to a pchan - stop the channel */ in zx_dma_terminate_all()
683 c->phy = NULL; in zx_dma_terminate_all()
684 p->vchan = NULL; in zx_dma_terminate_all()
685 p->ds_run = NULL; in zx_dma_terminate_all()
686 p->ds_done = NULL; in zx_dma_terminate_all()
688 spin_unlock_irqrestore(&c->vc.lock, flags); in zx_dma_terminate_all()
689 vchan_dma_desc_free_list(&c->vc, &head); in zx_dma_terminate_all()
699 val = readl_relaxed(c->phy->base + REG_ZX_CTRL); in zx_dma_transfer_pause()
701 writel_relaxed(val, c->phy->base + REG_ZX_CTRL); in zx_dma_transfer_pause()
711 val = readl_relaxed(c->phy->base + REG_ZX_CTRL); in zx_dma_transfer_resume()
713 writel_relaxed(val, c->phy->base + REG_ZX_CTRL); in zx_dma_transfer_resume()
722 struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device); in zx_dma_free_desc()
724 dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli); in zx_dma_free_desc()
729 { .compatible = "zte,zx296702-dma", },
737 struct zx_dma_dev *d = ofdma->of_dma_data; in zx_of_dma_simple_xlate()
738 unsigned int request = dma_spec->args[0]; in zx_of_dma_simple_xlate()
742 if (request >= d->dma_requests) in zx_of_dma_simple_xlate()
745 chan = dma_get_any_slave_channel(&d->slave); in zx_of_dma_simple_xlate()
747 dev_err(d->slave.dev, "get channel fail in %s.\n", __func__); in zx_of_dma_simple_xlate()
751 c->id = request; in zx_of_dma_simple_xlate()
752 dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n", in zx_of_dma_simple_xlate()
753 c->id, &c->vc); in zx_of_dma_simple_xlate()
765 return -EINVAL; in zx_dma_probe()
767 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL); in zx_dma_probe()
769 return -ENOMEM; in zx_dma_probe()
771 d->base = devm_ioremap_resource(&op->dev, iores); in zx_dma_probe()
772 if (IS_ERR(d->base)) in zx_dma_probe()
773 return PTR_ERR(d->base); in zx_dma_probe()
775 of_property_read_u32((&op->dev)->of_node, in zx_dma_probe()
776 "dma-channels", &d->dma_channels); in zx_dma_probe()
777 of_property_read_u32((&op->dev)->of_node, in zx_dma_probe()
778 "dma-requests", &d->dma_requests); in zx_dma_probe()
779 if (!d->dma_requests || !d->dma_channels) in zx_dma_probe()
780 return -EINVAL; in zx_dma_probe()
782 d->clk = devm_clk_get(&op->dev, NULL); in zx_dma_probe()
783 if (IS_ERR(d->clk)) { in zx_dma_probe()
784 dev_err(&op->dev, "no dma clk\n"); in zx_dma_probe()
785 return PTR_ERR(d->clk); in zx_dma_probe()
788 d->irq = platform_get_irq(op, 0); in zx_dma_probe()
789 ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler, in zx_dma_probe()
794 /* A DMA memory pool for LLIs, align on 32-byte boundary */ in zx_dma_probe()
795 d->pool = dmam_pool_create(DRIVER_NAME, &op->dev, in zx_dma_probe()
797 if (!d->pool) in zx_dma_probe()
798 return -ENOMEM; in zx_dma_probe()
801 d->phy = devm_kcalloc(&op->dev, in zx_dma_probe()
802 d->dma_channels, sizeof(struct zx_dma_phy), GFP_KERNEL); in zx_dma_probe()
803 if (!d->phy) in zx_dma_probe()
804 return -ENOMEM; in zx_dma_probe()
806 for (i = 0; i < d->dma_channels; i++) { in zx_dma_probe()
807 struct zx_dma_phy *p = &d->phy[i]; in zx_dma_probe()
809 p->idx = i; in zx_dma_probe()
810 p->base = d->base + i * 0x40; in zx_dma_probe()
813 INIT_LIST_HEAD(&d->slave.channels); in zx_dma_probe()
814 dma_cap_set(DMA_SLAVE, d->slave.cap_mask); in zx_dma_probe()
815 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); in zx_dma_probe()
816 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); in zx_dma_probe()
817 dma_cap_set(DMA_PRIVATE, d->slave.cap_mask); in zx_dma_probe()
818 d->slave.dev = &op->dev; in zx_dma_probe()
819 d->slave.device_free_chan_resources = zx_dma_free_chan_resources; in zx_dma_probe()
820 d->slave.device_tx_status = zx_dma_tx_status; in zx_dma_probe()
821 d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy; in zx_dma_probe()
822 d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg; in zx_dma_probe()
823 d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic; in zx_dma_probe()
824 d->slave.device_issue_pending = zx_dma_issue_pending; in zx_dma_probe()
825 d->slave.device_config = zx_dma_config; in zx_dma_probe()
826 d->slave.device_terminate_all = zx_dma_terminate_all; in zx_dma_probe()
827 d->slave.device_pause = zx_dma_transfer_pause; in zx_dma_probe()
828 d->slave.device_resume = zx_dma_transfer_resume; in zx_dma_probe()
829 d->slave.copy_align = DMA_ALIGN; in zx_dma_probe()
830 d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS; in zx_dma_probe()
831 d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS; in zx_dma_probe()
832 d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV) in zx_dma_probe()
834 d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; in zx_dma_probe()
837 d->chans = devm_kcalloc(&op->dev, in zx_dma_probe()
838 d->dma_requests, sizeof(struct zx_dma_chan), GFP_KERNEL); in zx_dma_probe()
839 if (!d->chans) in zx_dma_probe()
840 return -ENOMEM; in zx_dma_probe()
842 for (i = 0; i < d->dma_requests; i++) { in zx_dma_probe()
843 struct zx_dma_chan *c = &d->chans[i]; in zx_dma_probe()
845 c->status = DMA_IN_PROGRESS; in zx_dma_probe()
846 INIT_LIST_HEAD(&c->node); in zx_dma_probe()
847 c->vc.desc_free = zx_dma_free_desc; in zx_dma_probe()
848 vchan_init(&c->vc, &d->slave); in zx_dma_probe()
852 ret = clk_prepare_enable(d->clk); in zx_dma_probe()
854 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret); in zx_dma_probe()
860 spin_lock_init(&d->lock); in zx_dma_probe()
861 INIT_LIST_HEAD(&d->chan_pending); in zx_dma_probe()
864 ret = dma_async_device_register(&d->slave); in zx_dma_probe()
868 ret = of_dma_controller_register((&op->dev)->of_node, in zx_dma_probe()
873 dev_info(&op->dev, "initialized\n"); in zx_dma_probe()
877 dma_async_device_unregister(&d->slave); in zx_dma_probe()
879 clk_disable_unprepare(d->clk); in zx_dma_probe()
890 devm_free_irq(&op->dev, d->irq, d); in zx_dma_remove()
892 dma_async_device_unregister(&d->slave); in zx_dma_remove()
893 of_dma_controller_free((&op->dev)->of_node); in zx_dma_remove()
895 list_for_each_entry_safe(c, cn, &d->slave.channels, in zx_dma_remove()
897 list_del(&c->vc.chan.device_node); in zx_dma_remove()
899 clk_disable_unprepare(d->clk); in zx_dma_remove()
900 dmam_pool_destroy(d->pool); in zx_dma_remove()
913 dev_warn(d->slave.dev, in zx_dma_suspend_dev()
915 return -1; in zx_dma_suspend_dev()
917 clk_disable_unprepare(d->clk); in zx_dma_suspend_dev()
926 ret = clk_prepare_enable(d->clk); in zx_dma_resume_dev()
928 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret); in zx_dma_resume_dev()
950 MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver");