Lines Matching refs:c
93 struct bcm2835_chan *c; member
182 static inline size_t bcm2835_dma_max_frame_length(struct bcm2835_chan *c) in bcm2835_dma_max_frame_length() argument
185 return c->is_lite_channel ? MAX_LITE_DMA_LEN : MAX_DMA_LEN; in bcm2835_dma_max_frame_length()
200 static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c) in to_bcm2835_dma_chan() argument
202 return container_of(c, struct bcm2835_chan, vc.chan); in to_bcm2835_dma_chan()
216 dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb, in bcm2835_dma_free_cb_chain()
271 struct bcm2835_chan *c, in bcm2835_dma_count_frames_for_sg() argument
278 size_t plength = bcm2835_dma_max_frame_length(c); in bcm2835_dma_count_frames_for_sg()
313 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_create_cb_chain() local
329 d->c = c; in bcm2835_dma_create_cb_chain()
339 cb_entry->cb = dma_pool_alloc(c->cb_pool, gfp, in bcm2835_dma_create_cb_chain()
355 c, control_block, in bcm2835_dma_create_cb_chain()
398 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_fill_cb_chain_with_sg() local
404 max_len = bcm2835_dma_max_frame_length(c); in bcm2835_dma_fill_cb_chain_with_sg()
418 static int bcm2835_dma_abort(struct bcm2835_chan *c) in bcm2835_dma_abort() argument
420 void __iomem *chan_base = c->chan_base; in bcm2835_dma_abort()
440 dev_err(c->vc.chan.device->dev, in bcm2835_dma_abort()
447 static void bcm2835_dma_start_desc(struct bcm2835_chan *c) in bcm2835_dma_start_desc() argument
449 struct virt_dma_desc *vd = vchan_next_desc(&c->vc); in bcm2835_dma_start_desc()
453 c->desc = NULL; in bcm2835_dma_start_desc()
459 c->desc = d = to_bcm2835_dma_desc(&vd->tx); in bcm2835_dma_start_desc()
461 writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR); in bcm2835_dma_start_desc()
462 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); in bcm2835_dma_start_desc()
467 struct bcm2835_chan *c = data; in bcm2835_dma_callback() local
472 if (c->irq_flags & IRQF_SHARED) { in bcm2835_dma_callback()
474 flags = readl(c->chan_base + BCM2835_DMA_CS); in bcm2835_dma_callback()
480 spin_lock_irqsave(&c->vc.lock, flags); in bcm2835_dma_callback()
490 c->chan_base + BCM2835_DMA_CS); in bcm2835_dma_callback()
492 d = c->desc; in bcm2835_dma_callback()
498 } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) { in bcm2835_dma_callback()
499 vchan_cookie_complete(&c->desc->vd); in bcm2835_dma_callback()
500 bcm2835_dma_start_desc(c); in bcm2835_dma_callback()
504 spin_unlock_irqrestore(&c->vc.lock, flags); in bcm2835_dma_callback()
511 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_alloc_chan_resources() local
512 struct device *dev = c->vc.chan.device->dev; in bcm2835_dma_alloc_chan_resources()
514 dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); in bcm2835_dma_alloc_chan_resources()
516 c->cb_pool = dma_pool_create(dev_name(dev), dev, in bcm2835_dma_alloc_chan_resources()
518 if (!c->cb_pool) { in bcm2835_dma_alloc_chan_resources()
523 return request_irq(c->irq_number, bcm2835_dma_callback, in bcm2835_dma_alloc_chan_resources()
524 c->irq_flags, "DMA IRQ", c); in bcm2835_dma_alloc_chan_resources()
529 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_free_chan_resources() local
531 vchan_free_chan_resources(&c->vc); in bcm2835_dma_free_chan_resources()
532 free_irq(c->irq_number, c); in bcm2835_dma_free_chan_resources()
533 dma_pool_destroy(c->cb_pool); in bcm2835_dma_free_chan_resources()
535 dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); in bcm2835_dma_free_chan_resources()
570 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_tx_status() local
579 spin_lock_irqsave(&c->vc.lock, flags); in bcm2835_dma_tx_status()
580 vd = vchan_find_desc(&c->vc, cookie); in bcm2835_dma_tx_status()
584 } else if (c->desc && c->desc->vd.tx.cookie == cookie) { in bcm2835_dma_tx_status()
585 struct bcm2835_desc *d = c->desc; in bcm2835_dma_tx_status()
589 pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD); in bcm2835_dma_tx_status()
591 pos = readl(c->chan_base + BCM2835_DMA_DEST_AD); in bcm2835_dma_tx_status()
600 spin_unlock_irqrestore(&c->vc.lock, flags); in bcm2835_dma_tx_status()
607 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_issue_pending() local
610 spin_lock_irqsave(&c->vc.lock, flags); in bcm2835_dma_issue_pending()
611 if (vchan_issue_pending(&c->vc) && !c->desc) in bcm2835_dma_issue_pending()
612 bcm2835_dma_start_desc(c); in bcm2835_dma_issue_pending()
614 spin_unlock_irqrestore(&c->vc.lock, flags); in bcm2835_dma_issue_pending()
621 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_prep_dma_memcpy() local
625 size_t max_len = bcm2835_dma_max_frame_length(c); in bcm2835_dma_prep_dma_memcpy()
642 return vchan_tx_prep(&c->vc, &d->vd, flags); in bcm2835_dma_prep_dma_memcpy()
651 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_prep_slave_sg() local
664 if (c->dreq != 0) in bcm2835_dma_prep_slave_sg()
665 info |= BCM2835_DMA_PER_MAP(c->dreq); in bcm2835_dma_prep_slave_sg()
668 if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) in bcm2835_dma_prep_slave_sg()
670 src = c->cfg.src_addr; in bcm2835_dma_prep_slave_sg()
673 if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) in bcm2835_dma_prep_slave_sg()
675 dst = c->cfg.dst_addr; in bcm2835_dma_prep_slave_sg()
680 frames = bcm2835_dma_count_frames_for_sg(c, sgl, sg_len); in bcm2835_dma_prep_slave_sg()
694 return vchan_tx_prep(&c->vc, &d->vd, flags); in bcm2835_dma_prep_slave_sg()
702 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_prep_dma_cyclic() local
707 size_t max_len = bcm2835_dma_max_frame_length(c); in bcm2835_dma_prep_dma_cyclic()
732 if (c->dreq != 0) in bcm2835_dma_prep_dma_cyclic()
733 info |= BCM2835_DMA_PER_MAP(c->dreq); in bcm2835_dma_prep_dma_cyclic()
736 if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) in bcm2835_dma_prep_dma_cyclic()
738 src = c->cfg.src_addr; in bcm2835_dma_prep_dma_cyclic()
742 if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) in bcm2835_dma_prep_dma_cyclic()
744 dst = c->cfg.dst_addr; in bcm2835_dma_prep_dma_cyclic()
770 return vchan_tx_prep(&c->vc, &d->vd, flags); in bcm2835_dma_prep_dma_cyclic()
776 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_slave_config() local
786 c->cfg = *cfg; in bcm2835_dma_slave_config()
793 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_terminate_all() local
794 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); in bcm2835_dma_terminate_all()
798 spin_lock_irqsave(&c->vc.lock, flags); in bcm2835_dma_terminate_all()
802 list_del_init(&c->node); in bcm2835_dma_terminate_all()
806 if (c->desc) { in bcm2835_dma_terminate_all()
807 bcm2835_dma_desc_free(&c->desc->vd); in bcm2835_dma_terminate_all()
808 c->desc = NULL; in bcm2835_dma_terminate_all()
809 bcm2835_dma_abort(c); in bcm2835_dma_terminate_all()
812 vchan_get_all_descriptors(&c->vc, &head); in bcm2835_dma_terminate_all()
813 spin_unlock_irqrestore(&c->vc.lock, flags); in bcm2835_dma_terminate_all()
814 vchan_dma_desc_free_list(&c->vc, &head); in bcm2835_dma_terminate_all()
822 struct bcm2835_chan *c; in bcm2835_dma_chan_init() local
824 c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL); in bcm2835_dma_chan_init()
825 if (!c) in bcm2835_dma_chan_init()
828 c->vc.desc_free = bcm2835_dma_desc_free; in bcm2835_dma_chan_init()
829 vchan_init(&c->vc, &d->ddev); in bcm2835_dma_chan_init()
830 INIT_LIST_HEAD(&c->node); in bcm2835_dma_chan_init()
832 c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); in bcm2835_dma_chan_init()
833 c->ch = chan_id; in bcm2835_dma_chan_init()
834 c->irq_number = irq; in bcm2835_dma_chan_init()
835 c->irq_flags = irq_flags; in bcm2835_dma_chan_init()
838 if (readl(c->chan_base + BCM2835_DMA_DEBUG) & in bcm2835_dma_chan_init()
840 c->is_lite_channel = true; in bcm2835_dma_chan_init()
847 struct bcm2835_chan *c, *next; in bcm2835_dma_free() local
849 list_for_each_entry_safe(c, next, &od->ddev.channels, in bcm2835_dma_free()
851 list_del(&c->vc.chan.device_node); in bcm2835_dma_free()
852 tasklet_kill(&c->vc.task); in bcm2835_dma_free()