Lines Matching refs:bchan
313 static void bam_reset_channel(struct bam_chan *bchan) in bam_reset_channel() argument
315 struct bam_device *bdev = bchan->bdev; in bam_reset_channel()
317 lockdep_assert_held(&bchan->vc.lock); in bam_reset_channel()
320 writel_relaxed(1, bdev->regs + BAM_P_RST(bchan->id)); in bam_reset_channel()
321 writel_relaxed(0, bdev->regs + BAM_P_RST(bchan->id)); in bam_reset_channel()
327 bchan->initialized = 0; in bam_reset_channel()
336 static void bam_chan_init_hw(struct bam_chan *bchan, in bam_chan_init_hw() argument
339 struct bam_device *bdev = bchan->bdev; in bam_chan_init_hw()
343 bam_reset_channel(bchan); in bam_chan_init_hw()
349 writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), in bam_chan_init_hw()
350 bdev->regs + BAM_P_DESC_FIFO_ADDR(bchan->id)); in bam_chan_init_hw()
352 BAM_P_FIFO_SIZES(bchan->id)); in bam_chan_init_hw()
355 writel_relaxed(P_DEFAULT_IRQS_EN, bdev->regs + BAM_P_IRQ_EN(bchan->id)); in bam_chan_init_hw()
359 val |= BIT(bchan->id); in bam_chan_init_hw()
370 writel_relaxed(val, bdev->regs + BAM_P_CTRL(bchan->id)); in bam_chan_init_hw()
372 bchan->initialized = 1; in bam_chan_init_hw()
375 bchan->head = 0; in bam_chan_init_hw()
376 bchan->tail = 0; in bam_chan_init_hw()
387 struct bam_chan *bchan = to_bam_chan(chan); in bam_alloc_chan() local
388 struct bam_device *bdev = bchan->bdev; in bam_alloc_chan()
390 if (bchan->fifo_virt) in bam_alloc_chan()
394 bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, in bam_alloc_chan()
395 &bchan->fifo_phys, GFP_KERNEL); in bam_alloc_chan()
397 if (!bchan->fifo_virt) { in bam_alloc_chan()
414 struct bam_chan *bchan = to_bam_chan(chan); in bam_free_chan() local
415 struct bam_device *bdev = bchan->bdev; in bam_free_chan()
421 if (bchan->curr_txd) { in bam_free_chan()
422 dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); in bam_free_chan()
426 spin_lock_irqsave(&bchan->vc.lock, flags); in bam_free_chan()
427 bam_reset_channel(bchan); in bam_free_chan()
428 spin_unlock_irqrestore(&bchan->vc.lock, flags); in bam_free_chan()
430 dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt, in bam_free_chan()
431 bchan->fifo_phys); in bam_free_chan()
432 bchan->fifo_virt = NULL; in bam_free_chan()
436 val &= ~BIT(bchan->id); in bam_free_chan()
440 writel_relaxed(0, bdev->regs + BAM_P_IRQ_EN(bchan->id)); in bam_free_chan()
451 static void bam_slave_config(struct bam_chan *bchan, in bam_slave_config() argument
454 memcpy(&bchan->slave, cfg, sizeof(*cfg)); in bam_slave_config()
455 bchan->reconfigure = 1; in bam_slave_config()
473 struct bam_chan *bchan = to_bam_chan(chan); in bam_prep_slave_sg() local
474 struct bam_device *bdev = bchan->bdev; in bam_prep_slave_sg()
533 return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags); in bam_prep_slave_sg()
548 static void bam_dma_terminate_all(struct bam_chan *bchan) in bam_dma_terminate_all() argument
554 spin_lock_irqsave(&bchan->vc.lock, flag); in bam_dma_terminate_all()
555 if (bchan->curr_txd) { in bam_dma_terminate_all()
556 list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued); in bam_dma_terminate_all()
557 bchan->curr_txd = NULL; in bam_dma_terminate_all()
560 vchan_get_all_descriptors(&bchan->vc, &head); in bam_dma_terminate_all()
561 spin_unlock_irqrestore(&bchan->vc.lock, flag); in bam_dma_terminate_all()
563 vchan_dma_desc_free_list(&bchan->vc, &head); in bam_dma_terminate_all()
578 struct bam_chan *bchan = to_bam_chan(chan); in bam_control() local
579 struct bam_device *bdev = bchan->bdev; in bam_control()
585 spin_lock_irqsave(&bchan->vc.lock, flag); in bam_control()
586 writel_relaxed(1, bdev->regs + BAM_P_HALT(bchan->id)); in bam_control()
587 bchan->paused = 1; in bam_control()
588 spin_unlock_irqrestore(&bchan->vc.lock, flag); in bam_control()
592 spin_lock_irqsave(&bchan->vc.lock, flag); in bam_control()
593 writel_relaxed(0, bdev->regs + BAM_P_HALT(bchan->id)); in bam_control()
594 bchan->paused = 0; in bam_control()
595 spin_unlock_irqrestore(&bchan->vc.lock, flag); in bam_control()
599 bam_dma_terminate_all(bchan); in bam_control()
603 spin_lock_irqsave(&bchan->vc.lock, flag); in bam_control()
604 bam_slave_config(bchan, (struct dma_slave_config *)arg); in bam_control()
605 spin_unlock_irqrestore(&bchan->vc.lock, flag); in bam_control()
636 struct bam_chan *bchan = &bdev->channels[i]; in process_channel_irqs() local
648 spin_lock_irqsave(&bchan->vc.lock, flags); in process_channel_irqs()
649 async_desc = bchan->curr_txd; in process_channel_irqs()
654 bchan->curr_txd = NULL; in process_channel_irqs()
657 bchan->head += async_desc->xfer_len; in process_channel_irqs()
658 bchan->head %= MAX_DESCRIPTORS; in process_channel_irqs()
669 &bchan->vc.desc_issued); in process_channel_irqs()
672 spin_unlock_irqrestore(&bchan->vc.lock, flags); in process_channel_irqs()
718 struct bam_chan *bchan = to_bam_chan(chan); in bam_tx_status() local
730 return bchan->paused ? DMA_PAUSED : ret; in bam_tx_status()
732 spin_lock_irqsave(&bchan->vc.lock, flags); in bam_tx_status()
733 vd = vchan_find_desc(&bchan->vc, cookie); in bam_tx_status()
736 else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie) in bam_tx_status()
737 for (i = 0; i < bchan->curr_txd->num_desc; i++) in bam_tx_status()
738 residue += bchan->curr_txd->curr_desc[i].size; in bam_tx_status()
740 spin_unlock_irqrestore(&bchan->vc.lock, flags); in bam_tx_status()
744 if (ret == DMA_IN_PROGRESS && bchan->paused) in bam_tx_status()
755 static void bam_apply_new_config(struct bam_chan *bchan, in bam_apply_new_config() argument
758 struct bam_device *bdev = bchan->bdev; in bam_apply_new_config()
762 maxburst = bchan->slave.src_maxburst; in bam_apply_new_config()
764 maxburst = bchan->slave.dst_maxburst; in bam_apply_new_config()
768 bchan->reconfigure = 0; in bam_apply_new_config()
775 static void bam_start_dma(struct bam_chan *bchan) in bam_start_dma() argument
777 struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc); in bam_start_dma()
778 struct bam_device *bdev = bchan->bdev; in bam_start_dma()
781 struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, in bam_start_dma()
784 lockdep_assert_held(&bchan->vc.lock); in bam_start_dma()
792 bchan->curr_txd = async_desc; in bam_start_dma()
795 if (!bchan->initialized) in bam_start_dma()
796 bam_chan_init_hw(bchan, async_desc->dir); in bam_start_dma()
799 if (bchan->reconfigure) in bam_start_dma()
800 bam_apply_new_config(bchan, async_desc->dir); in bam_start_dma()
802 desc = bchan->curr_txd->curr_desc; in bam_start_dma()
815 if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { in bam_start_dma()
816 u32 partial = MAX_DESCRIPTORS - bchan->tail; in bam_start_dma()
818 memcpy(&fifo[bchan->tail], desc, in bam_start_dma()
823 memcpy(&fifo[bchan->tail], desc, in bam_start_dma()
827 bchan->tail += async_desc->xfer_len; in bam_start_dma()
828 bchan->tail %= MAX_DESCRIPTORS; in bam_start_dma()
832 writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), in bam_start_dma()
833 bdev->regs + BAM_P_EVNT_REG(bchan->id)); in bam_start_dma()
845 struct bam_chan *bchan; in dma_tasklet() local
851 bchan = &bdev->channels[i]; in dma_tasklet()
852 spin_lock_irqsave(&bchan->vc.lock, flags); in dma_tasklet()
854 if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd) in dma_tasklet()
855 bam_start_dma(bchan); in dma_tasklet()
856 spin_unlock_irqrestore(&bchan->vc.lock, flags); in dma_tasklet()
868 struct bam_chan *bchan = to_bam_chan(chan); in bam_issue_pending() local
871 spin_lock_irqsave(&bchan->vc.lock, flags); in bam_issue_pending()
874 if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd) in bam_issue_pending()
875 bam_start_dma(bchan); in bam_issue_pending()
877 spin_unlock_irqrestore(&bchan->vc.lock, flags); in bam_issue_pending()
962 static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan, in bam_channel_init() argument
965 bchan->id = index; in bam_channel_init()
966 bchan->bdev = bdev; in bam_channel_init()
968 vchan_init(&bchan->vc, &bdev->common); in bam_channel_init()
969 bchan->vc.desc_free = bam_dma_free_desc; in bam_channel_init()