Lines Matching +full:0 +full:d
29 #define DMA_MAX_SIZE 0x1ffc
30 #define DMA_CYCLIC_MAX_PERIOD 0x1000
33 #define INT_STAT 0x00
34 #define INT_TC1 0x04
35 #define INT_TC2 0x08
36 #define INT_ERR1 0x0c
37 #define INT_ERR2 0x10
38 #define INT_TC1_MASK 0x18
39 #define INT_TC2_MASK 0x1c
40 #define INT_ERR1_MASK 0x20
41 #define INT_ERR2_MASK 0x24
42 #define INT_TC1_RAW 0x600
43 #define INT_TC2_RAW 0x608
44 #define INT_ERR1_RAW 0x610
45 #define INT_ERR2_RAW 0x618
46 #define CH_PRI 0x688
47 #define CH_STAT 0x690
48 #define CX_CUR_CNT 0x704
49 #define CX_LLI 0x800
50 #define CX_CNT1 0x80c
51 #define CX_CNT0 0x810
52 #define CX_SRC 0x814
53 #define CX_DST 0x818
54 #define CX_CFG 0x81c
55 #define AXI_CFG 0x820
56 #define AXI_CFG_DEFAULT 0x201201
58 #define CX_LLI_CHAIN_EN 0x2
59 #define CX_CFG_EN 0x1
61 #define CX_CFG_MEM2PER (0x1 << 2)
62 #define CX_CFG_PER2MEM (0x2 << 2)
63 #define CX_CFG_SRCINCR (0x1 << 31)
64 #define CX_CFG_DSTINCR (0x1 << 30)
128 u32 val = 0; in k3_dma_pause_dma()
141 static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d) in k3_dma_terminate_chan() argument
143 u32 val = 0; in k3_dma_terminate_chan()
147 val = 0x1 << phy->idx; in k3_dma_terminate_chan()
148 writel_relaxed(val, d->base + INT_TC1_RAW); in k3_dma_terminate_chan()
149 writel_relaxed(val, d->base + INT_TC2_RAW); in k3_dma_terminate_chan()
150 writel_relaxed(val, d->base + INT_ERR1_RAW); in k3_dma_terminate_chan()
151 writel_relaxed(val, d->base + INT_ERR2_RAW); in k3_dma_terminate_chan()
164 static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy) in k3_dma_get_curr_cnt() argument
166 u32 cnt = 0; in k3_dma_get_curr_cnt()
168 cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10); in k3_dma_get_curr_cnt()
169 cnt &= 0xffff; in k3_dma_get_curr_cnt()
178 static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d) in k3_dma_get_chan_stat() argument
180 return readl_relaxed(d->base + CH_STAT); in k3_dma_get_chan_stat()
183 static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on) in k3_dma_enable_dma() argument
187 writel_relaxed(0x0, d->base + CH_PRI); in k3_dma_enable_dma()
190 writel_relaxed(0xffff, d->base + INT_TC1_MASK); in k3_dma_enable_dma()
191 writel_relaxed(0xffff, d->base + INT_TC2_MASK); in k3_dma_enable_dma()
192 writel_relaxed(0xffff, d->base + INT_ERR1_MASK); in k3_dma_enable_dma()
193 writel_relaxed(0xffff, d->base + INT_ERR2_MASK); in k3_dma_enable_dma()
196 writel_relaxed(0x0, d->base + INT_TC1_MASK); in k3_dma_enable_dma()
197 writel_relaxed(0x0, d->base + INT_TC2_MASK); in k3_dma_enable_dma()
198 writel_relaxed(0x0, d->base + INT_ERR1_MASK); in k3_dma_enable_dma()
199 writel_relaxed(0x0, d->base + INT_ERR2_MASK); in k3_dma_enable_dma()
205 struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id; in k3_dma_int_handler() local
208 u32 stat = readl_relaxed(d->base + INT_STAT); in k3_dma_int_handler()
209 u32 tc1 = readl_relaxed(d->base + INT_TC1); in k3_dma_int_handler()
210 u32 tc2 = readl_relaxed(d->base + INT_TC2); in k3_dma_int_handler()
211 u32 err1 = readl_relaxed(d->base + INT_ERR1); in k3_dma_int_handler()
212 u32 err2 = readl_relaxed(d->base + INT_ERR2); in k3_dma_int_handler()
213 u32 i, irq_chan = 0; in k3_dma_int_handler()
221 p = &d->phy[i]; in k3_dma_int_handler()
241 dev_warn(d->slave.dev, "DMA ERR\n"); in k3_dma_int_handler()
244 writel_relaxed(irq_chan, d->base + INT_TC1_RAW); in k3_dma_int_handler()
245 writel_relaxed(irq_chan, d->base + INT_TC2_RAW); in k3_dma_int_handler()
246 writel_relaxed(err1, d->base + INT_ERR1_RAW); in k3_dma_int_handler()
247 writel_relaxed(err2, d->base + INT_ERR2_RAW); in k3_dma_int_handler()
250 tasklet_schedule(&d->task); in k3_dma_int_handler()
260 struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device); in k3_dma_start_txd() local
266 if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d)) in k3_dma_start_txd()
285 k3_dma_set_desc(c->phy, &ds->desc_hw[0]); in k3_dma_start_txd()
286 return 0; in k3_dma_start_txd()
295 struct k3_dma_dev *d = (struct k3_dma_dev *)arg; in k3_dma_tasklet() local
298 unsigned pch, pch_alloc = 0; in k3_dma_tasklet()
301 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { in k3_dma_tasklet()
307 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx); in k3_dma_tasklet()
316 /* check new channel request in d->chan_pending */ in k3_dma_tasklet()
317 spin_lock_irq(&d->lock); in k3_dma_tasklet()
318 for (pch = 0; pch < d->dma_channels; pch++) { in k3_dma_tasklet()
319 p = &d->phy[pch]; in k3_dma_tasklet()
321 if (p->vchan == NULL && !list_empty(&d->chan_pending)) { in k3_dma_tasklet()
322 c = list_first_entry(&d->chan_pending, in k3_dma_tasklet()
324 /* remove from d->chan_pending */ in k3_dma_tasklet()
330 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); in k3_dma_tasklet()
333 spin_unlock_irq(&d->lock); in k3_dma_tasklet()
335 for (pch = 0; pch < d->dma_channels; pch++) { in k3_dma_tasklet()
337 p = &d->phy[pch]; in k3_dma_tasklet()
351 struct k3_dma_dev *d = to_k3_dma(chan->device); in k3_dma_free_chan_resources() local
354 spin_lock_irqsave(&d->lock, flags); in k3_dma_free_chan_resources()
356 spin_unlock_irqrestore(&d->lock, flags); in k3_dma_free_chan_resources()
359 c->ccfg = 0; in k3_dma_free_chan_resources()
366 struct k3_dma_dev *d = to_k3_dma(chan->device); in k3_dma_tx_status() local
371 size_t bytes = 0; in k3_dma_tx_status()
389 bytes = 0; in k3_dma_tx_status()
392 u32 clli = 0, index = 0; in k3_dma_tx_status()
394 bytes = k3_dma_get_curr_cnt(d, p); in k3_dma_tx_status()
413 struct k3_dma_dev *d = to_k3_dma(chan->device); in k3_dma_issue_pending() local
419 spin_lock(&d->lock); in k3_dma_issue_pending()
423 list_add_tail(&c->node, &d->chan_pending); in k3_dma_issue_pending()
425 tasklet_schedule(&d->task); in k3_dma_issue_pending()
426 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); in k3_dma_issue_pending()
429 spin_unlock(&d->lock); in k3_dma_issue_pending()
431 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); in k3_dma_issue_pending()
454 struct k3_dma_dev *d = to_k3_dma(chan->device); in k3_dma_alloc_desc_resource() local
458 dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n", in k3_dma_alloc_desc_resource()
467 ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli); in k3_dma_alloc_desc_resource()
483 size_t copy = 0; in k3_dma_prep_memcpy()
484 int num = 0; in k3_dma_prep_memcpy()
495 c->cyclic = 0; in k3_dma_prep_memcpy()
497 num = 0; in k3_dma_prep_memcpy()
502 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ in k3_dma_prep_memcpy()
503 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ in k3_dma_prep_memcpy()
521 ds->desc_hw[num-1].lli = 0; /* end of link */ in k3_dma_prep_memcpy()
531 size_t len, avail, total = 0; in k3_dma_prep_slave_sg()
533 dma_addr_t addr, src = 0, dst = 0; in k3_dma_prep_slave_sg()
539 c->cyclic = 0; in k3_dma_prep_slave_sg()
550 num = 0; in k3_dma_prep_slave_sg()
575 ds->desc_hw[num-1].lli = 0; /* end of link */ in k3_dma_prep_slave_sg()
588 size_t len, avail, total = 0; in k3_dma_prep_dma_cyclic()
589 dma_addr_t addr, src = 0, dst = 0; in k3_dma_prep_dma_cyclic()
590 int num = 1, since = 0; in k3_dma_prep_dma_cyclic()
592 u32 en_tc2 = 0; in k3_dma_prep_dma_cyclic()
594 dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n", in k3_dma_prep_dma_cyclic()
610 num = 0; in k3_dma_prep_dma_cyclic()
631 en_tc2 = 0; in k3_dma_prep_dma_cyclic()
651 u32 maxburst = 0, val = 0; in k3_dma_config()
681 if ((maxburst == 0) || (maxburst > 16)) in k3_dma_config()
691 return 0; in k3_dma_config()
698 struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device); in k3_dma_free_desc() local
700 dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli); in k3_dma_free_desc()
707 struct k3_dma_dev *d = to_k3_dma(chan->device); in k3_dma_terminate_all() local
712 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); in k3_dma_terminate_all()
715 spin_lock(&d->lock); in k3_dma_terminate_all()
717 spin_unlock(&d->lock); in k3_dma_terminate_all()
724 k3_dma_terminate_chan(p, d); in k3_dma_terminate_all()
736 return 0; in k3_dma_terminate_all()
749 struct k3_dma_dev *d = to_k3_dma(chan->device); in k3_dma_transfer_pause() local
752 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); in k3_dma_transfer_pause()
758 spin_lock(&d->lock); in k3_dma_transfer_pause()
760 spin_unlock(&d->lock); in k3_dma_transfer_pause()
764 return 0; in k3_dma_transfer_pause()
770 struct k3_dma_dev *d = to_k3_dma(chan->device); in k3_dma_transfer_resume() local
774 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); in k3_dma_transfer_resume()
781 spin_lock(&d->lock); in k3_dma_transfer_resume()
782 list_add_tail(&c->node, &d->chan_pending); in k3_dma_transfer_resume()
783 spin_unlock(&d->lock); in k3_dma_transfer_resume()
788 return 0; in k3_dma_transfer_resume()
800 struct k3_dma_dev *d = ofdma->of_dma_data; in k3_of_dma_simple_xlate() local
801 unsigned int request = dma_spec->args[0]; in k3_of_dma_simple_xlate()
803 if (request >= d->dma_requests) in k3_of_dma_simple_xlate()
806 return dma_get_slave_channel(&(d->chans[request].vc.chan)); in k3_of_dma_simple_xlate()
811 struct k3_dma_dev *d; in k3_dma_probe() local
814 int i, ret, irq = 0; in k3_dma_probe()
816 iores = platform_get_resource(op, IORESOURCE_MEM, 0); in k3_dma_probe()
820 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL); in k3_dma_probe()
821 if (!d) in k3_dma_probe()
824 d->base = devm_ioremap_resource(&op->dev, iores); in k3_dma_probe()
825 if (IS_ERR(d->base)) in k3_dma_probe()
826 return PTR_ERR(d->base); in k3_dma_probe()
831 "dma-channels", &d->dma_channels); in k3_dma_probe()
833 "dma-requests", &d->dma_requests); in k3_dma_probe()
836 d->clk = devm_clk_get(&op->dev, NULL); in k3_dma_probe()
837 if (IS_ERR(d->clk)) { in k3_dma_probe()
839 return PTR_ERR(d->clk); in k3_dma_probe()
842 irq = platform_get_irq(op, 0); in k3_dma_probe()
844 k3_dma_int_handler, 0, DRIVER_NAME, d); in k3_dma_probe()
848 d->irq = irq; in k3_dma_probe()
851 d->pool = dmam_pool_create(DRIVER_NAME, &op->dev, in k3_dma_probe()
852 LLI_BLOCK_SIZE, 32, 0); in k3_dma_probe()
853 if (!d->pool) in k3_dma_probe()
857 d->phy = devm_kcalloc(&op->dev, in k3_dma_probe()
858 d->dma_channels, sizeof(struct k3_dma_phy), GFP_KERNEL); in k3_dma_probe()
859 if (d->phy == NULL) in k3_dma_probe()
862 for (i = 0; i < d->dma_channels; i++) { in k3_dma_probe()
863 struct k3_dma_phy *p = &d->phy[i]; in k3_dma_probe()
866 p->base = d->base + i * 0x40; in k3_dma_probe()
869 INIT_LIST_HEAD(&d->slave.channels); in k3_dma_probe()
870 dma_cap_set(DMA_SLAVE, d->slave.cap_mask); in k3_dma_probe()
871 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); in k3_dma_probe()
872 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); in k3_dma_probe()
873 d->slave.dev = &op->dev; in k3_dma_probe()
874 d->slave.device_free_chan_resources = k3_dma_free_chan_resources; in k3_dma_probe()
875 d->slave.device_tx_status = k3_dma_tx_status; in k3_dma_probe()
876 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; in k3_dma_probe()
877 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; in k3_dma_probe()
878 d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic; in k3_dma_probe()
879 d->slave.device_issue_pending = k3_dma_issue_pending; in k3_dma_probe()
880 d->slave.device_config = k3_dma_config; in k3_dma_probe()
881 d->slave.device_pause = k3_dma_transfer_pause; in k3_dma_probe()
882 d->slave.device_resume = k3_dma_transfer_resume; in k3_dma_probe()
883 d->slave.device_terminate_all = k3_dma_terminate_all; in k3_dma_probe()
884 d->slave.device_synchronize = k3_dma_synchronize; in k3_dma_probe()
885 d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES; in k3_dma_probe()
888 d->chans = devm_kcalloc(&op->dev, in k3_dma_probe()
889 d->dma_requests, sizeof(struct k3_dma_chan), GFP_KERNEL); in k3_dma_probe()
890 if (d->chans == NULL) in k3_dma_probe()
893 for (i = 0; i < d->dma_requests; i++) { in k3_dma_probe()
894 struct k3_dma_chan *c = &d->chans[i]; in k3_dma_probe()
899 vchan_init(&c->vc, &d->slave); in k3_dma_probe()
903 ret = clk_prepare_enable(d->clk); in k3_dma_probe()
904 if (ret < 0) { in k3_dma_probe()
905 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret); in k3_dma_probe()
909 k3_dma_enable_dma(d, true); in k3_dma_probe()
911 ret = dma_async_device_register(&d->slave); in k3_dma_probe()
916 k3_of_dma_simple_xlate, d); in k3_dma_probe()
920 spin_lock_init(&d->lock); in k3_dma_probe()
921 INIT_LIST_HEAD(&d->chan_pending); in k3_dma_probe()
922 tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d); in k3_dma_probe()
923 platform_set_drvdata(op, d); in k3_dma_probe()
926 return 0; in k3_dma_probe()
929 dma_async_device_unregister(&d->slave); in k3_dma_probe()
931 clk_disable_unprepare(d->clk); in k3_dma_probe()
938 struct k3_dma_dev *d = platform_get_drvdata(op); in k3_dma_remove() local
940 dma_async_device_unregister(&d->slave); in k3_dma_remove()
943 devm_free_irq(&op->dev, d->irq, d); in k3_dma_remove()
945 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { in k3_dma_remove()
949 tasklet_kill(&d->task); in k3_dma_remove()
950 clk_disable_unprepare(d->clk); in k3_dma_remove()
951 return 0; in k3_dma_remove()
957 struct k3_dma_dev *d = dev_get_drvdata(dev); in k3_dma_suspend_dev() local
958 u32 stat = 0; in k3_dma_suspend_dev()
960 stat = k3_dma_get_chan_stat(d); in k3_dma_suspend_dev()
962 dev_warn(d->slave.dev, in k3_dma_suspend_dev()
963 "chan %d is running fail to suspend\n", stat); in k3_dma_suspend_dev()
966 k3_dma_enable_dma(d, false); in k3_dma_suspend_dev()
967 clk_disable_unprepare(d->clk); in k3_dma_suspend_dev()
968 return 0; in k3_dma_suspend_dev()
973 struct k3_dma_dev *d = dev_get_drvdata(dev); in k3_dma_resume_dev() local
974 int ret = 0; in k3_dma_resume_dev()
976 ret = clk_prepare_enable(d->clk); in k3_dma_resume_dev()
977 if (ret < 0) { in k3_dma_resume_dev()
978 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret); in k3_dma_resume_dev()
981 k3_dma_enable_dma(d, true); in k3_dma_resume_dev()
982 return 0; in k3_dma_resume_dev()