• Home
  • Raw
  • Download

Lines Matching +full:chan +full:- +full:name

1 // SPDX-License-Identifier: GPL-2.0-only
9 #include <linux/dma-mapping.h>
19 #define DRV_NAME "pch-dma"
29 #define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
91 struct dma_chan chan; member
110 #define channel_readl(pdc, name) \ argument
111 readl((pdc)->membase + PDC_##name)
112 #define channel_writel(pdc, name, val) \ argument
113 writel((val), (pdc)->membase + PDC_##name)
132 #define dma_readl(pd, name) \ argument
133 readl((pd)->membase + PCH_DMA_##name)
134 #define dma_writel(pd, name, val) \ argument
135 writel((val), (pd)->membase + PCH_DMA_##name)
143 static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan) in to_pd_chan() argument
145 return container_of(chan, struct pch_dma_chan, chan); in to_pd_chan()
153 static inline struct device *chan2dev(struct dma_chan *chan) in chan2dev() argument
155 return &chan->dev->device; in chan2dev()
158 static inline struct device *chan2parent(struct dma_chan *chan) in chan2parent() argument
160 return chan->dev->device.parent; in chan2parent()
166 return list_first_entry(&pd_chan->active_list, in pdc_first_active()
173 return list_first_entry(&pd_chan->queue, in pdc_first_queued()
177 static void pdc_enable_irq(struct dma_chan *chan, int enable) in pdc_enable_irq() argument
179 struct pch_dma *pd = to_pd(chan->device); in pdc_enable_irq()
183 if (chan->chan_id < 8) in pdc_enable_irq()
184 pos = chan->chan_id; in pdc_enable_irq()
186 pos = chan->chan_id + 8; in pdc_enable_irq()
197 dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n", in pdc_enable_irq()
198 chan->chan_id, val); in pdc_enable_irq()
201 static void pdc_set_dir(struct dma_chan *chan) in pdc_set_dir() argument
203 struct pch_dma_chan *pd_chan = to_pd_chan(chan); in pdc_set_dir()
204 struct pch_dma *pd = to_pd(chan->device); in pdc_set_dir()
209 if (chan->chan_id < 8) { in pdc_set_dir()
213 (DMA_CTL0_BITS_PER_CH * chan->chan_id); in pdc_set_dir()
215 (DMA_CTL0_BITS_PER_CH * chan->chan_id)); in pdc_set_dir()
217 if (pd_chan->dir == DMA_MEM_TO_DEV) in pdc_set_dir()
218 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + in pdc_set_dir()
221 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + in pdc_set_dir()
227 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ in pdc_set_dir()
235 if (pd_chan->dir == DMA_MEM_TO_DEV) in pdc_set_dir()
245 dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n", in pdc_set_dir()
246 chan->chan_id, val); in pdc_set_dir()
249 static void pdc_set_mode(struct dma_chan *chan, u32 mode) in pdc_set_mode() argument
251 struct pch_dma *pd = to_pd(chan->device); in pdc_set_mode()
256 if (chan->chan_id < 8) { in pdc_set_mode()
258 (DMA_CTL0_BITS_PER_CH * chan->chan_id)); in pdc_set_mode()
259 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\ in pdc_set_mode()
263 val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); in pdc_set_mode()
267 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ in pdc_set_mode()
279 dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", in pdc_set_mode()
280 chan->chan_id, val); in pdc_set_mode()
285 struct pch_dma *pd = to_pd(pd_chan->chan.device); in pdc_get_status0()
290 DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); in pdc_get_status0()
295 struct pch_dma *pd = to_pd(pd_chan->chan.device); in pdc_get_status2()
300 DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8))); in pdc_get_status2()
307 if (pd_chan->chan.chan_id < 8) in pdc_is_idle()
322 dev_err(chan2dev(&pd_chan->chan), in pdc_dostart()
323 "BUG: Attempt to start non-idle channel\n"); in pdc_dostart()
327 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n", in pdc_dostart()
328 pd_chan->chan.chan_id, desc->regs.dev_addr); in pdc_dostart()
329 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n", in pdc_dostart()
330 pd_chan->chan.chan_id, desc->regs.mem_addr); in pdc_dostart()
331 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n", in pdc_dostart()
332 pd_chan->chan.chan_id, desc->regs.size); in pdc_dostart()
333 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n", in pdc_dostart()
334 pd_chan->chan.chan_id, desc->regs.next); in pdc_dostart()
336 if (list_empty(&desc->tx_list)) { in pdc_dostart()
337 channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); in pdc_dostart()
338 channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); in pdc_dostart()
339 channel_writel(pd_chan, SIZE, desc->regs.size); in pdc_dostart()
340 channel_writel(pd_chan, NEXT, desc->regs.next); in pdc_dostart()
341 pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT); in pdc_dostart()
343 channel_writel(pd_chan, NEXT, desc->txd.phys); in pdc_dostart()
344 pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); in pdc_dostart()
351 struct dma_async_tx_descriptor *txd = &desc->txd; in pdc_chain_complete()
355 list_splice_init(&desc->tx_list, &pd_chan->free_list); in pdc_chain_complete()
356 list_move(&desc->desc_node, &pd_chan->free_list); in pdc_chain_complete()
368 if (!list_empty(&pd_chan->queue)) in pdc_complete_all()
371 list_splice_init(&pd_chan->active_list, &list); in pdc_complete_all()
372 list_splice_init(&pd_chan->queue, &pd_chan->active_list); in pdc_complete_all()
383 list_del(&bad_desc->desc_node); in pdc_handle_error()
385 list_splice_init(&pd_chan->queue, pd_chan->active_list.prev); in pdc_handle_error()
387 if (!list_empty(&pd_chan->active_list)) in pdc_handle_error()
390 dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n"); in pdc_handle_error()
391 dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n", in pdc_handle_error()
392 bad_desc->txd.cookie); in pdc_handle_error()
399 if (list_empty(&pd_chan->active_list) || in pdc_advance_work()
400 list_is_singular(&pd_chan->active_list)) { in pdc_advance_work()
411 struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); in pd_tx_submit()
413 spin_lock(&pd_chan->lock); in pd_tx_submit()
415 if (list_empty(&pd_chan->active_list)) { in pd_tx_submit()
416 list_add_tail(&desc->desc_node, &pd_chan->active_list); in pd_tx_submit()
419 list_add_tail(&desc->desc_node, &pd_chan->queue); in pd_tx_submit()
422 spin_unlock(&pd_chan->lock); in pd_tx_submit()
426 static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) in pdc_alloc_desc() argument
429 struct pch_dma *pd = to_pd(chan->device); in pdc_alloc_desc()
432 desc = dma_pool_zalloc(pd->pool, flags, &addr); in pdc_alloc_desc()
434 INIT_LIST_HEAD(&desc->tx_list); in pdc_alloc_desc()
435 dma_async_tx_descriptor_init(&desc->txd, chan); in pdc_alloc_desc()
436 desc->txd.tx_submit = pd_tx_submit; in pdc_alloc_desc()
437 desc->txd.flags = DMA_CTRL_ACK; in pdc_alloc_desc()
438 desc->txd.phys = addr; in pdc_alloc_desc()
450 spin_lock(&pd_chan->lock); in pdc_desc_get()
451 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { in pdc_desc_get()
453 if (async_tx_test_ack(&desc->txd)) { in pdc_desc_get()
454 list_del(&desc->desc_node); in pdc_desc_get()
458 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); in pdc_desc_get()
460 spin_unlock(&pd_chan->lock); in pdc_desc_get()
461 dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); in pdc_desc_get()
464 ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC); in pdc_desc_get()
466 spin_lock(&pd_chan->lock); in pdc_desc_get()
467 pd_chan->descs_allocated++; in pdc_desc_get()
468 spin_unlock(&pd_chan->lock); in pdc_desc_get()
470 dev_err(chan2dev(&pd_chan->chan), in pdc_desc_get()
482 spin_lock(&pd_chan->lock); in pdc_desc_put()
483 list_splice_init(&desc->tx_list, &pd_chan->free_list); in pdc_desc_put()
484 list_add(&desc->desc_node, &pd_chan->free_list); in pdc_desc_put()
485 spin_unlock(&pd_chan->lock); in pdc_desc_put()
489 static int pd_alloc_chan_resources(struct dma_chan *chan) in pd_alloc_chan_resources() argument
491 struct pch_dma_chan *pd_chan = to_pd_chan(chan); in pd_alloc_chan_resources()
497 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); in pd_alloc_chan_resources()
498 return -EIO; in pd_alloc_chan_resources()
501 if (!list_empty(&pd_chan->free_list)) in pd_alloc_chan_resources()
502 return pd_chan->descs_allocated; in pd_alloc_chan_resources()
505 desc = pdc_alloc_desc(chan, GFP_KERNEL); in pd_alloc_chan_resources()
508 dev_warn(chan2dev(chan), in pd_alloc_chan_resources()
513 list_add_tail(&desc->desc_node, &tmp_list); in pd_alloc_chan_resources()
516 spin_lock_irq(&pd_chan->lock); in pd_alloc_chan_resources()
517 list_splice(&tmp_list, &pd_chan->free_list); in pd_alloc_chan_resources()
518 pd_chan->descs_allocated = i; in pd_alloc_chan_resources()
519 dma_cookie_init(chan); in pd_alloc_chan_resources()
520 spin_unlock_irq(&pd_chan->lock); in pd_alloc_chan_resources()
522 pdc_enable_irq(chan, 1); in pd_alloc_chan_resources()
524 return pd_chan->descs_allocated; in pd_alloc_chan_resources()
527 static void pd_free_chan_resources(struct dma_chan *chan) in pd_free_chan_resources() argument
529 struct pch_dma_chan *pd_chan = to_pd_chan(chan); in pd_free_chan_resources()
530 struct pch_dma *pd = to_pd(chan->device); in pd_free_chan_resources()
535 BUG_ON(!list_empty(&pd_chan->active_list)); in pd_free_chan_resources()
536 BUG_ON(!list_empty(&pd_chan->queue)); in pd_free_chan_resources()
538 spin_lock_irq(&pd_chan->lock); in pd_free_chan_resources()
539 list_splice_init(&pd_chan->free_list, &tmp_list); in pd_free_chan_resources()
540 pd_chan->descs_allocated = 0; in pd_free_chan_resources()
541 spin_unlock_irq(&pd_chan->lock); in pd_free_chan_resources()
544 dma_pool_free(pd->pool, desc, desc->txd.phys); in pd_free_chan_resources()
546 pdc_enable_irq(chan, 0); in pd_free_chan_resources()
549 static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, in pd_tx_status() argument
552 return dma_cookie_status(chan, cookie, txstate); in pd_tx_status()
555 static void pd_issue_pending(struct dma_chan *chan) in pd_issue_pending() argument
557 struct pch_dma_chan *pd_chan = to_pd_chan(chan); in pd_issue_pending()
560 spin_lock(&pd_chan->lock); in pd_issue_pending()
562 spin_unlock(&pd_chan->lock); in pd_issue_pending()
566 static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, in pd_prep_slave_sg() argument
571 struct pch_dma_chan *pd_chan = to_pd_chan(chan); in pd_prep_slave_sg()
572 struct pch_dma_slave *pd_slave = chan->private; in pd_prep_slave_sg()
581 dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n"); in pd_prep_slave_sg()
586 reg = pd_slave->rx_reg; in pd_prep_slave_sg()
588 reg = pd_slave->tx_reg; in pd_prep_slave_sg()
592 pd_chan->dir = direction; in pd_prep_slave_sg()
593 pdc_set_dir(chan); in pd_prep_slave_sg()
601 desc->regs.dev_addr = reg; in pd_prep_slave_sg()
602 desc->regs.mem_addr = sg_dma_address(sg); in pd_prep_slave_sg()
603 desc->regs.size = sg_dma_len(sg); in pd_prep_slave_sg()
604 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ; in pd_prep_slave_sg()
606 switch (pd_slave->width) { in pd_prep_slave_sg()
608 if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE) in pd_prep_slave_sg()
610 desc->regs.size |= DMA_DESC_WIDTH_1_BYTE; in pd_prep_slave_sg()
613 if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES) in pd_prep_slave_sg()
615 desc->regs.size |= DMA_DESC_WIDTH_2_BYTES; in pd_prep_slave_sg()
618 if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES) in pd_prep_slave_sg()
620 desc->regs.size |= DMA_DESC_WIDTH_4_BYTES; in pd_prep_slave_sg()
629 prev->regs.next |= desc->txd.phys; in pd_prep_slave_sg()
630 list_add_tail(&desc->desc_node, &first->tx_list); in pd_prep_slave_sg()
637 desc->regs.next = DMA_DESC_END_WITH_IRQ; in pd_prep_slave_sg()
639 desc->regs.next = DMA_DESC_END_WITHOUT_IRQ; in pd_prep_slave_sg()
641 first->txd.cookie = -EBUSY; in pd_prep_slave_sg()
642 desc->txd.flags = flags; in pd_prep_slave_sg()
644 return &first->txd; in pd_prep_slave_sg()
647 dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n"); in pd_prep_slave_sg()
652 static int pd_device_terminate_all(struct dma_chan *chan) in pd_device_terminate_all() argument
654 struct pch_dma_chan *pd_chan = to_pd_chan(chan); in pd_device_terminate_all()
658 spin_lock_irq(&pd_chan->lock); in pd_device_terminate_all()
660 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); in pd_device_terminate_all()
662 list_splice_init(&pd_chan->active_list, &list); in pd_device_terminate_all()
663 list_splice_init(&pd_chan->queue, &list); in pd_device_terminate_all()
668 spin_unlock_irq(&pd_chan->lock); in pd_device_terminate_all()
679 dev_err(chan2dev(&pd_chan->chan), in pdc_tasklet()
680 "BUG: handle non-idle channel in tasklet\n"); in pdc_tasklet()
684 spin_lock_irqsave(&pd_chan->lock, flags); in pdc_tasklet()
685 if (test_and_clear_bit(0, &pd_chan->err_status)) in pdc_tasklet()
689 spin_unlock_irqrestore(&pd_chan->lock, flags); in pdc_tasklet()
705 dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); in pd_irq()
707 for (i = 0; i < pd->dma.chancnt; i++) { in pd_irq()
708 pd_chan = &pd->channels[i]; in pd_irq()
713 set_bit(0, &pd_chan->err_status); in pd_irq()
715 tasklet_schedule(&pd_chan->tasklet); in pd_irq()
719 if (sts2 & DMA_STATUS_IRQ(i - 8)) { in pd_irq()
721 set_bit(0, &pd_chan->err_status); in pd_irq()
723 tasklet_schedule(&pd_chan->tasklet); in pd_irq()
741 struct dma_chan *chan, *_c; in pch_dma_save_regs() local
744 pd->regs.dma_ctl0 = dma_readl(pd, CTL0); in pch_dma_save_regs()
745 pd->regs.dma_ctl1 = dma_readl(pd, CTL1); in pch_dma_save_regs()
746 pd->regs.dma_ctl2 = dma_readl(pd, CTL2); in pch_dma_save_regs()
747 pd->regs.dma_ctl3 = dma_readl(pd, CTL3); in pch_dma_save_regs()
749 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { in pch_dma_save_regs()
750 pd_chan = to_pd_chan(chan); in pch_dma_save_regs()
752 pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR); in pch_dma_save_regs()
753 pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR); in pch_dma_save_regs()
754 pd->ch_regs[i].size = channel_readl(pd_chan, SIZE); in pch_dma_save_regs()
755 pd->ch_regs[i].next = channel_readl(pd_chan, NEXT); in pch_dma_save_regs()
764 struct dma_chan *chan, *_c; in pch_dma_restore_regs() local
767 dma_writel(pd, CTL0, pd->regs.dma_ctl0); in pch_dma_restore_regs()
768 dma_writel(pd, CTL1, pd->regs.dma_ctl1); in pch_dma_restore_regs()
769 dma_writel(pd, CTL2, pd->regs.dma_ctl2); in pch_dma_restore_regs()
770 dma_writel(pd, CTL3, pd->regs.dma_ctl3); in pch_dma_restore_regs()
772 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { in pch_dma_restore_regs()
773 pd_chan = to_pd_chan(chan); in pch_dma_restore_regs()
775 channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr); in pch_dma_restore_regs()
776 channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr); in pch_dma_restore_regs()
777 channel_writel(pd_chan, SIZE, pd->ch_regs[i].size); in pch_dma_restore_regs()
778 channel_writel(pd_chan, NEXT, pd->ch_regs[i].next); in pch_dma_restore_regs()
813 nr_channels = id->driver_data; in pch_dma_probe()
816 return -ENOMEM; in pch_dma_probe()
822 dev_err(&pdev->dev, "Cannot enable PCI device\n"); in pch_dma_probe()
827 dev_err(&pdev->dev, "Cannot find proper base address\n"); in pch_dma_probe()
828 err = -ENODEV; in pch_dma_probe()
834 dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); in pch_dma_probe()
840 dev_err(&pdev->dev, "Cannot set proper DMA config\n"); in pch_dma_probe()
844 regs = pd->membase = pci_iomap(pdev, 1, 0); in pch_dma_probe()
845 if (!pd->membase) { in pch_dma_probe()
846 dev_err(&pdev->dev, "Cannot map MMIO registers\n"); in pch_dma_probe()
847 err = -ENOMEM; in pch_dma_probe()
852 pd->dma.dev = &pdev->dev; in pch_dma_probe()
854 err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); in pch_dma_probe()
856 dev_err(&pdev->dev, "Failed to request IRQ\n"); in pch_dma_probe()
860 pd->pool = dma_pool_create("pch_dma_desc_pool", &pdev->dev, in pch_dma_probe()
862 if (!pd->pool) { in pch_dma_probe()
863 dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n"); in pch_dma_probe()
864 err = -ENOMEM; in pch_dma_probe()
869 INIT_LIST_HEAD(&pd->dma.channels); in pch_dma_probe()
872 struct pch_dma_chan *pd_chan = &pd->channels[i]; in pch_dma_probe()
874 pd_chan->chan.device = &pd->dma; in pch_dma_probe()
875 dma_cookie_init(&pd_chan->chan); in pch_dma_probe()
877 pd_chan->membase = &regs->desc[i]; in pch_dma_probe()
879 spin_lock_init(&pd_chan->lock); in pch_dma_probe()
881 INIT_LIST_HEAD(&pd_chan->active_list); in pch_dma_probe()
882 INIT_LIST_HEAD(&pd_chan->queue); in pch_dma_probe()
883 INIT_LIST_HEAD(&pd_chan->free_list); in pch_dma_probe()
885 tasklet_setup(&pd_chan->tasklet, pdc_tasklet); in pch_dma_probe()
886 list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels); in pch_dma_probe()
889 dma_cap_zero(pd->dma.cap_mask); in pch_dma_probe()
890 dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask); in pch_dma_probe()
891 dma_cap_set(DMA_SLAVE, pd->dma.cap_mask); in pch_dma_probe()
893 pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources; in pch_dma_probe()
894 pd->dma.device_free_chan_resources = pd_free_chan_resources; in pch_dma_probe()
895 pd->dma.device_tx_status = pd_tx_status; in pch_dma_probe()
896 pd->dma.device_issue_pending = pd_issue_pending; in pch_dma_probe()
897 pd->dma.device_prep_slave_sg = pd_prep_slave_sg; in pch_dma_probe()
898 pd->dma.device_terminate_all = pd_device_terminate_all; in pch_dma_probe()
900 err = dma_async_device_register(&pd->dma); in pch_dma_probe()
902 dev_err(&pdev->dev, "Failed to register DMA device\n"); in pch_dma_probe()
909 dma_pool_destroy(pd->pool); in pch_dma_probe()
911 free_irq(pdev->irq, pd); in pch_dma_probe()
913 pci_iounmap(pdev, pd->membase); in pch_dma_probe()
927 struct dma_chan *chan, *_c; in pch_dma_remove() local
930 dma_async_device_unregister(&pd->dma); in pch_dma_remove()
932 free_irq(pdev->irq, pd); in pch_dma_remove()
934 list_for_each_entry_safe(chan, _c, &pd->dma.channels, in pch_dma_remove()
936 pd_chan = to_pd_chan(chan); in pch_dma_remove()
938 tasklet_kill(&pd_chan->tasklet); in pch_dma_remove()
941 dma_pool_destroy(pd->pool); in pch_dma_remove()
942 pci_iounmap(pdev, pd->membase); in pch_dma_remove()
982 .name = DRV_NAME,