• Home
  • Raw
  • Download

Lines Matching refs:chan

155 	struct nbpf_channel *chan;  member
237 struct nbpf_channel chan[]; member
305 static inline u32 nbpf_chan_read(struct nbpf_channel *chan, in nbpf_chan_read() argument
308 u32 data = ioread32(chan->base + offset); in nbpf_chan_read()
309 dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", in nbpf_chan_read()
310 __func__, chan->base, offset, data); in nbpf_chan_read()
314 static inline void nbpf_chan_write(struct nbpf_channel *chan, in nbpf_chan_write() argument
317 iowrite32(data, chan->base + offset); in nbpf_chan_write()
318 dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", in nbpf_chan_write()
319 __func__, chan->base, offset, data); in nbpf_chan_write()
339 static void nbpf_chan_halt(struct nbpf_channel *chan) in nbpf_chan_halt() argument
341 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); in nbpf_chan_halt()
344 static bool nbpf_status_get(struct nbpf_channel *chan) in nbpf_status_get() argument
346 u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END); in nbpf_status_get()
348 return status & BIT(chan - chan->nbpf->chan); in nbpf_status_get()
351 static void nbpf_status_ack(struct nbpf_channel *chan) in nbpf_status_ack() argument
353 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREND); in nbpf_status_ack()
363 return nbpf->chan + __ffs(error); in nbpf_error_get_channel()
366 static void nbpf_error_clear(struct nbpf_channel *chan) in nbpf_error_clear() argument
372 nbpf_chan_halt(chan); in nbpf_error_clear()
375 status = nbpf_chan_read(chan, NBPF_CHAN_STAT); in nbpf_error_clear()
382 dev_err(chan->dma_chan.device->dev, in nbpf_error_clear()
385 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SWRST); in nbpf_error_clear()
390 struct nbpf_channel *chan = desc->chan; in nbpf_start() local
393 nbpf_chan_write(chan, NBPF_CHAN_NXLA, (u32)ldesc->hwdesc_dma_addr); in nbpf_start()
394 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETEN | NBPF_CHAN_CTRL_CLRSUS); in nbpf_start()
395 chan->paused = false; in nbpf_start()
399 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_STG); in nbpf_start()
401 dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__, in nbpf_start()
402 nbpf_chan_read(chan, NBPF_CHAN_NXLA), nbpf_chan_read(chan, NBPF_CHAN_CRLA)); in nbpf_start()
407 static void nbpf_chan_prepare(struct nbpf_channel *chan) in nbpf_chan_prepare() argument
409 chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) | in nbpf_chan_prepare()
410 (chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) | in nbpf_chan_prepare()
411 (chan->flags & NBPF_SLAVE_RQ_LEVEL ? in nbpf_chan_prepare()
413 chan->terminal; in nbpf_chan_prepare()
416 static void nbpf_chan_prepare_default(struct nbpf_channel *chan) in nbpf_chan_prepare_default() argument
419 chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400; in nbpf_chan_prepare_default()
420 chan->terminal = 0; in nbpf_chan_prepare_default()
421 chan->flags = 0; in nbpf_chan_prepare_default()
424 static void nbpf_chan_configure(struct nbpf_channel *chan) in nbpf_chan_configure() argument
431 nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg); in nbpf_chan_configure()
512 struct nbpf_channel *chan = desc->chan; in nbpf_prep_one() local
513 struct device *dev = chan->dma_chan.device->dev; in nbpf_prep_one()
540 mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction); in nbpf_prep_one()
544 can_burst = chan->slave_src_width >= 3; in nbpf_prep_one()
546 chan->slave_src_burst : chan->slave_src_width); in nbpf_prep_one()
551 if (mem_xfer > chan->slave_src_burst && !can_burst) in nbpf_prep_one()
552 mem_xfer = chan->slave_src_burst; in nbpf_prep_one()
560 slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ? in nbpf_prep_one()
561 chan->slave_dst_burst : chan->slave_dst_width); in nbpf_prep_one()
576 hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) | in nbpf_prep_one()
589 static size_t nbpf_bytes_left(struct nbpf_channel *chan) in nbpf_bytes_left() argument
591 return nbpf_chan_read(chan, NBPF_CHAN_CUR_TR_BYTE); in nbpf_bytes_left()
604 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_issue_pending() local
609 spin_lock_irqsave(&chan->lock, flags); in nbpf_issue_pending()
610 if (list_empty(&chan->queued)) in nbpf_issue_pending()
613 list_splice_tail_init(&chan->queued, &chan->active); in nbpf_issue_pending()
615 if (!chan->running) { in nbpf_issue_pending()
616 struct nbpf_desc *desc = list_first_entry(&chan->active, in nbpf_issue_pending()
619 chan->running = desc; in nbpf_issue_pending()
623 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_issue_pending()
629 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_tx_status() local
636 spin_lock_irqsave(&chan->lock, flags); in nbpf_tx_status()
637 running = chan->running ? chan->running->async_tx.cookie : -EINVAL; in nbpf_tx_status()
640 state->residue = nbpf_bytes_left(chan); in nbpf_tx_status()
647 list_for_each_entry(desc, &chan->active, node) in nbpf_tx_status()
654 list_for_each_entry(desc, &chan->queued, node) in nbpf_tx_status()
664 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_tx_status()
667 if (chan->paused) in nbpf_tx_status()
676 struct nbpf_channel *chan = desc->chan; in nbpf_tx_submit() local
680 spin_lock_irqsave(&chan->lock, flags); in nbpf_tx_submit()
682 list_add_tail(&desc->node, &chan->queued); in nbpf_tx_submit()
683 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_tx_submit()
685 dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n", __func__, cookie); in nbpf_tx_submit()
690 static int nbpf_desc_page_alloc(struct nbpf_channel *chan) in nbpf_desc_page_alloc() argument
692 struct dma_chan *dchan = &chan->dma_chan; in nbpf_desc_page_alloc()
725 desc->chan = chan; in nbpf_desc_page_alloc()
734 spin_lock_irq(&chan->lock); in nbpf_desc_page_alloc()
735 list_splice_tail(&lhead, &chan->free_links); in nbpf_desc_page_alloc()
736 list_splice_tail(&head, &chan->free); in nbpf_desc_page_alloc()
737 list_add(&dpage->node, &chan->desc_page); in nbpf_desc_page_alloc()
738 spin_unlock_irq(&chan->lock); in nbpf_desc_page_alloc()
745 struct nbpf_channel *chan = desc->chan; in nbpf_desc_put() local
749 spin_lock_irqsave(&chan->lock, flags); in nbpf_desc_put()
751 list_move(&ldesc->node, &chan->free_links); in nbpf_desc_put()
753 list_add(&desc->node, &chan->free); in nbpf_desc_put()
754 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_desc_put()
757 static void nbpf_scan_acked(struct nbpf_channel *chan) in nbpf_scan_acked() argument
763 spin_lock_irqsave(&chan->lock, flags); in nbpf_scan_acked()
764 list_for_each_entry_safe(desc, tmp, &chan->done, node) in nbpf_scan_acked()
769 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_scan_acked()
783 static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len) in nbpf_desc_get() argument
788 nbpf_scan_acked(chan); in nbpf_desc_get()
790 spin_lock_irq(&chan->lock); in nbpf_desc_get()
795 if (list_empty(&chan->free)) { in nbpf_desc_get()
797 spin_unlock_irq(&chan->lock); in nbpf_desc_get()
798 ret = nbpf_desc_page_alloc(chan); in nbpf_desc_get()
801 spin_lock_irq(&chan->lock); in nbpf_desc_get()
804 desc = list_first_entry(&chan->free, struct nbpf_desc, node); in nbpf_desc_get()
808 if (list_empty(&chan->free_links)) { in nbpf_desc_get()
810 spin_unlock_irq(&chan->lock); in nbpf_desc_get()
811 ret = nbpf_desc_page_alloc(chan); in nbpf_desc_get()
816 spin_lock_irq(&chan->lock); in nbpf_desc_get()
820 ldesc = list_first_entry(&chan->free_links, in nbpf_desc_get()
835 spin_unlock_irq(&chan->lock); in nbpf_desc_get()
840 static void nbpf_chan_idle(struct nbpf_channel *chan) in nbpf_chan_idle() argument
846 spin_lock_irqsave(&chan->lock, flags); in nbpf_chan_idle()
848 list_splice_init(&chan->done, &head); in nbpf_chan_idle()
849 list_splice_init(&chan->active, &head); in nbpf_chan_idle()
850 list_splice_init(&chan->queued, &head); in nbpf_chan_idle()
852 chan->running = NULL; in nbpf_chan_idle()
854 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_chan_idle()
857 dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n", in nbpf_chan_idle()
866 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_pause() local
870 chan->paused = true; in nbpf_pause()
871 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS); in nbpf_pause()
873 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); in nbpf_pause()
880 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_terminate_all() local
885 nbpf_chan_halt(chan); in nbpf_terminate_all()
886 nbpf_chan_idle(chan); in nbpf_terminate_all()
894 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_config() local
904 chan->slave_dst_addr = config->dst_addr; in nbpf_config()
905 chan->slave_dst_width = nbpf_xfer_size(chan->nbpf, in nbpf_config()
907 chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf, in nbpf_config()
910 chan->slave_src_addr = config->src_addr; in nbpf_config()
911 chan->slave_src_width = nbpf_xfer_size(chan->nbpf, in nbpf_config()
913 chan->slave_src_burst = nbpf_xfer_size(chan->nbpf, in nbpf_config()
920 static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan, in nbpf_prep_sg() argument
952 desc = nbpf_desc_get(chan, len); in nbpf_prep_sg()
993 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_prep_memcpy() local
1009 return nbpf_prep_sg(chan, &src_sg, &dst_sg, 1, in nbpf_prep_memcpy()
1017 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_prep_slave_sg() local
1026 sg_dma_address(&slave_sg) = chan->slave_dst_addr; in nbpf_prep_slave_sg()
1027 return nbpf_prep_sg(chan, sgl, &slave_sg, sg_len, in nbpf_prep_slave_sg()
1031 sg_dma_address(&slave_sg) = chan->slave_src_addr; in nbpf_prep_slave_sg()
1032 return nbpf_prep_sg(chan, &slave_sg, sgl, sg_len, in nbpf_prep_slave_sg()
1042 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_alloc_chan_resources() local
1045 INIT_LIST_HEAD(&chan->free); in nbpf_alloc_chan_resources()
1046 INIT_LIST_HEAD(&chan->free_links); in nbpf_alloc_chan_resources()
1047 INIT_LIST_HEAD(&chan->queued); in nbpf_alloc_chan_resources()
1048 INIT_LIST_HEAD(&chan->active); in nbpf_alloc_chan_resources()
1049 INIT_LIST_HEAD(&chan->done); in nbpf_alloc_chan_resources()
1051 ret = nbpf_desc_page_alloc(chan); in nbpf_alloc_chan_resources()
1056 chan->terminal); in nbpf_alloc_chan_resources()
1058 nbpf_chan_configure(chan); in nbpf_alloc_chan_resources()
1065 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_free_chan_resources() local
1070 nbpf_chan_halt(chan); in nbpf_free_chan_resources()
1071 nbpf_chan_idle(chan); in nbpf_free_chan_resources()
1073 nbpf_chan_prepare_default(chan); in nbpf_free_chan_resources()
1075 list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) { in nbpf_free_chan_resources()
1093 struct nbpf_channel *chan; in nbpf_of_xlate() local
1105 chan = nbpf_to_chan(dchan); in nbpf_of_xlate()
1107 chan->terminal = dma_spec->args[0]; in nbpf_of_xlate()
1108 chan->flags = dma_spec->args[1]; in nbpf_of_xlate()
1110 nbpf_chan_prepare(chan); in nbpf_of_xlate()
1111 nbpf_chan_configure(chan); in nbpf_of_xlate()
1118 struct nbpf_channel *chan = from_tasklet(chan, t, tasklet); in nbpf_chan_tasklet() local
1122 while (!list_empty(&chan->done)) { in nbpf_chan_tasklet()
1125 spin_lock_irq(&chan->lock); in nbpf_chan_tasklet()
1127 list_for_each_entry_safe(desc, tmp, &chan->done, node) { in nbpf_chan_tasklet()
1138 spin_unlock_irq(&chan->lock); in nbpf_chan_tasklet()
1150 spin_unlock_irq(&chan->lock); in nbpf_chan_tasklet()
1171 spin_unlock_irq(&chan->lock); in nbpf_chan_tasklet()
1182 struct nbpf_channel *chan = dev; in nbpf_chan_irq() local
1183 bool done = nbpf_status_get(chan); in nbpf_chan_irq()
1191 nbpf_status_ack(chan); in nbpf_chan_irq()
1193 dev_dbg(&chan->dma_chan.dev->device, "%s()\n", __func__); in nbpf_chan_irq()
1195 spin_lock(&chan->lock); in nbpf_chan_irq()
1196 desc = chan->running; in nbpf_chan_irq()
1205 list_move_tail(&desc->node, &chan->done); in nbpf_chan_irq()
1206 chan->running = NULL; in nbpf_chan_irq()
1208 if (!list_empty(&chan->active)) { in nbpf_chan_irq()
1209 desc = list_first_entry(&chan->active, in nbpf_chan_irq()
1212 chan->running = desc; in nbpf_chan_irq()
1216 spin_unlock(&chan->lock); in nbpf_chan_irq()
1219 tasklet_schedule(&chan->tasklet); in nbpf_chan_irq()
1235 struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error); in nbpf_err_irq() local
1237 nbpf_error_clear(chan); in nbpf_err_irq()
1238 nbpf_chan_idle(chan); in nbpf_err_irq()
1248 struct nbpf_channel *chan = nbpf->chan + n; in nbpf_chan_probe() local
1251 chan->nbpf = nbpf; in nbpf_chan_probe()
1252 chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n; in nbpf_chan_probe()
1253 INIT_LIST_HEAD(&chan->desc_page); in nbpf_chan_probe()
1254 spin_lock_init(&chan->lock); in nbpf_chan_probe()
1255 chan->dma_chan.device = dma_dev; in nbpf_chan_probe()
1256 dma_cookie_init(&chan->dma_chan); in nbpf_chan_probe()
1257 nbpf_chan_prepare_default(chan); in nbpf_chan_probe()
1259 dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base); in nbpf_chan_probe()
1261 snprintf(chan->name, sizeof(chan->name), "nbpf %d", n); in nbpf_chan_probe()
1263 tasklet_setup(&chan->tasklet, nbpf_chan_tasklet); in nbpf_chan_probe()
1264 ret = devm_request_irq(dma_dev->dev, chan->irq, in nbpf_chan_probe()
1266 chan->name, chan); in nbpf_chan_probe()
1271 list_add_tail(&chan->dma_chan.device_node, in nbpf_chan_probe()
1313 nbpf = devm_kzalloc(dev, struct_size(nbpf, chan, num_channels), in nbpf_probe()
1360 nbpf->chan[i].irq = irqbuf[0]; in nbpf_probe()
1367 struct nbpf_channel *chan; in nbpf_probe() local
1369 for (i = 0, chan = nbpf->chan; i <= num_channels; in nbpf_probe()
1370 i++, chan++) { in nbpf_probe()
1374 chan->irq = irqbuf[i]; in nbpf_probe()
1377 if (chan != nbpf->chan + num_channels) in nbpf_probe()
1387 nbpf->chan[i].irq = irq; in nbpf_probe()
1470 struct nbpf_channel *chan = nbpf->chan + i; in nbpf_remove() local
1472 devm_free_irq(&pdev->dev, chan->irq, chan); in nbpf_remove()
1474 tasklet_kill(&chan->tasklet); in nbpf_remove()