• Home
  • Raw
  • Download

Lines Matching +full:tx +full:- +full:m

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Applied Micro X-Gene SoC DMA engine Driver
15 #include <linux/dma-mapping.h>
26 /* X-Gene DMA ring csr registers and bit definations */
42 #define XGENE_DMA_RING_NE_INT_MODE_SET(m, v) \ argument
43 ((m) = ((m) & ~BIT(31 - (v))) | BIT(31 - (v)))
44 #define XGENE_DMA_RING_NE_INT_MODE_RESET(m, v) \ argument
45 ((m) &= (~BIT(31 - (v))))
55 #define XGENE_DMA_RING_COHERENT_SET(m) \ argument
56 (((u32 *)(m))[2] |= BIT(4))
57 #define XGENE_DMA_RING_ADDRL_SET(m, v) \ argument
58 (((u32 *)(m))[2] |= (((v) >> 8) << 5))
59 #define XGENE_DMA_RING_ADDRH_SET(m, v) \ argument
60 (((u32 *)(m))[3] |= ((v) >> 35))
61 #define XGENE_DMA_RING_ACCEPTLERR_SET(m) \ argument
62 (((u32 *)(m))[3] |= BIT(19))
63 #define XGENE_DMA_RING_SIZE_SET(m, v) \ argument
64 (((u32 *)(m))[3] |= ((v) << 23))
65 #define XGENE_DMA_RING_RECOMBBUF_SET(m) \ argument
66 (((u32 *)(m))[3] |= BIT(27))
67 #define XGENE_DMA_RING_RECOMTIMEOUTL_SET(m) \ argument
68 (((u32 *)(m))[3] |= (0x7 << 28))
69 #define XGENE_DMA_RING_RECOMTIMEOUTH_SET(m) \ argument
70 (((u32 *)(m))[4] |= 0x3)
71 #define XGENE_DMA_RING_SELTHRSH_SET(m) \ argument
72 (((u32 *)(m))[4] |= BIT(3))
73 #define XGENE_DMA_RING_TYPE_SET(m, v) \ argument
74 (((u32 *)(m))[4] |= ((v) << 19))
76 /* X-Gene DMA device csr registers and bit definitions */
105 /* X-Gene SoC EFUSE csr register and bit defination */
109 /* X-Gene DMA Descriptor format */
119 #define XGENE_DMA_DESC_ELERR_RD(m) \ argument
120 (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3)
121 #define XGENE_DMA_DESC_LERR_RD(m) \ argument
122 (((m) >> XGENE_DMA_DESC_LERR_POS) & 0x7)
126 /* X-Gene DMA descriptor empty s/w signature */
129 /* X-Gene DMA configurable parameters defines */
147 /* X-Gene DMA descriptor error codes */
160 /* X-Gene DMA error interrupt codes */
174 /* X-Gene DMA flyby operation code */
180 /* X-Gene DMA SW descriptor flags */
183 /* Define to dump X-Gene DMA descriptor */
184 #define XGENE_DMA_DESC_DUMP(desc, m) \ argument
185 print_hex_dump(KERN_ERR, (m), \
188 #define to_dma_desc_sw(tx) \ argument
189 container_of(tx, struct xgene_dma_desc_sw, tx)
194 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
196 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
241 struct dma_async_tx_descriptor tx; member
245 * struct xgene_dma_chan - internal representation of an X-Gene DMA channel
247 * @pdma: X-Gene DMA device structure reference
251 * @name: name of X-Gene DMA channel
261 * are waiting for the ACK bit to be set by the async tx API.
289 * struct xgene_dma - internal representation of an X-Gene DMA device
299 * @chan: reference to X-Gene DMA channels
347 val = ioread32(pdma->csr_efuse + XGENE_SOC_JTAG1_SHADOW); in is_pq_enabled()
380 *len -= nbytes; in xgene_dma_set_src_buffer()
388 return &desc->m1; in xgene_dma_lookup_ext8()
390 return &desc->m0; in xgene_dma_lookup_ext8()
392 return &desc->m3; in xgene_dma_lookup_ext8()
394 return &desc->m2; in xgene_dma_lookup_ext8()
405 desc->m0 |= cpu_to_le64(XGENE_DMA_DESC_IN_BIT); in xgene_dma_init_desc()
406 desc->m0 |= cpu_to_le64((u64)XGENE_DMA_RING_OWNER_DMA << in xgene_dma_init_desc()
408 desc->m1 |= cpu_to_le64(XGENE_DMA_DESC_C_BIT); in xgene_dma_init_desc()
409 desc->m3 |= cpu_to_le64((u64)dst_ring_num << in xgene_dma_init_desc()
423 desc1 = &desc_sw->desc1; in xgene_dma_prep_xor_desc()
424 desc2 = &desc_sw->desc2; in xgene_dma_prep_xor_desc()
427 xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); in xgene_dma_prep_xor_desc()
430 desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT); in xgene_dma_prep_xor_desc()
431 desc1->m3 |= cpu_to_le64(*dst); in xgene_dma_prep_xor_desc()
434 desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT); in xgene_dma_prep_xor_desc()
437 desc1->m2 |= cpu_to_le64(xgene_dma_encode_xor_flyby(src_cnt)); in xgene_dma_prep_xor_desc()
442 xgene_dma_set_src_buffer((i == 0) ? &desc1->m1 : in xgene_dma_prep_xor_desc()
443 xgene_dma_lookup_ext8(desc2, i - 1), in xgene_dma_prep_xor_desc()
445 desc1->m2 |= cpu_to_le64((scf[i] << ((i + 1) * 8))); in xgene_dma_prep_xor_desc()
453 desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC; in xgene_dma_prep_xor_desc()
456 static dma_cookie_t xgene_dma_tx_submit(struct dma_async_tx_descriptor *tx) in xgene_dma_tx_submit() argument
462 if (unlikely(!tx)) in xgene_dma_tx_submit()
463 return -EINVAL; in xgene_dma_tx_submit()
465 chan = to_dma_chan(tx->chan); in xgene_dma_tx_submit()
466 desc = to_dma_desc_sw(tx); in xgene_dma_tx_submit()
468 spin_lock_bh(&chan->lock); in xgene_dma_tx_submit()
470 cookie = dma_cookie_assign(tx); in xgene_dma_tx_submit()
473 list_splice_tail_init(&desc->tx_list, &chan->ld_pending); in xgene_dma_tx_submit()
475 spin_unlock_bh(&chan->lock); in xgene_dma_tx_submit()
483 list_del(&desc->node); in xgene_dma_clean_descriptor()
485 dma_pool_free(chan->desc_pool, desc, desc->tx.phys); in xgene_dma_clean_descriptor()
494 desc = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys); in xgene_dma_alloc_descriptor()
500 INIT_LIST_HEAD(&desc->tx_list); in xgene_dma_alloc_descriptor()
501 desc->tx.phys = phys; in xgene_dma_alloc_descriptor()
502 desc->tx.tx_submit = xgene_dma_tx_submit; in xgene_dma_alloc_descriptor()
503 dma_async_tx_descriptor_init(&desc->tx, &chan->dma_chan); in xgene_dma_alloc_descriptor()
511 * xgene_dma_clean_completed_descriptor - free all descriptors which
513 * @chan: X-Gene DMA channel
522 list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) { in xgene_dma_clean_completed_descriptor()
523 if (async_tx_test_ack(&desc->tx)) in xgene_dma_clean_completed_descriptor()
529 * xgene_dma_run_tx_complete_actions - cleanup a single link descriptor
530 * @chan: X-Gene DMA channel
539 struct dma_async_tx_descriptor *tx = &desc->tx; in xgene_dma_run_tx_complete_actions() local
548 if (tx->cookie == 0) in xgene_dma_run_tx_complete_actions()
551 dma_cookie_complete(tx); in xgene_dma_run_tx_complete_actions()
552 dma_descriptor_unmap(tx); in xgene_dma_run_tx_complete_actions()
555 dmaengine_desc_get_callback_invoke(tx, NULL); in xgene_dma_run_tx_complete_actions()
558 dma_run_dependencies(tx); in xgene_dma_run_tx_complete_actions()
562 * xgene_dma_clean_running_descriptor - move the completed descriptor from
564 * @chan: X-Gene DMA channel
574 list_del(&desc->node); in xgene_dma_clean_running_descriptor()
580 if (!async_tx_test_ack(&desc->tx)) { in xgene_dma_clean_running_descriptor()
585 list_add_tail(&desc->node, &chan->ld_completed); in xgene_dma_clean_running_descriptor()
590 dma_pool_free(chan->desc_pool, desc, desc->tx.phys); in xgene_dma_clean_running_descriptor()
596 struct xgene_dma_ring *ring = &chan->tx_ring; in xgene_chan_xfer_request()
599 /* Get hw descriptor from DMA tx ring */ in xgene_chan_xfer_request()
600 desc_hw = &ring->desc_hw[ring->head]; in xgene_chan_xfer_request()
606 if (++ring->head == ring->slots) in xgene_chan_xfer_request()
607 ring->head = 0; in xgene_chan_xfer_request()
610 memcpy(desc_hw, &desc_sw->desc1, sizeof(*desc_hw)); in xgene_chan_xfer_request()
616 if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) { in xgene_chan_xfer_request()
617 desc_hw = &ring->desc_hw[ring->head]; in xgene_chan_xfer_request()
619 if (++ring->head == ring->slots) in xgene_chan_xfer_request()
620 ring->head = 0; in xgene_chan_xfer_request()
622 memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); in xgene_chan_xfer_request()
626 chan->pending += ((desc_sw->flags & in xgene_chan_xfer_request()
630 iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? in xgene_chan_xfer_request()
631 2 : 1, ring->cmd); in xgene_chan_xfer_request()
635 * xgene_chan_xfer_ld_pending - push any pending transactions to hw
636 * @chan : X-Gene DMA channel
638 * LOCKING: must hold chan->lock
648 if (list_empty(&chan->ld_pending)) { in xgene_chan_xfer_ld_pending()
657 list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_pending, node) { in xgene_chan_xfer_ld_pending()
664 if (chan->pending >= chan->max_outstanding) in xgene_chan_xfer_ld_pending()
673 list_move_tail(&desc_sw->node, &chan->ld_running); in xgene_chan_xfer_ld_pending()
678 * xgene_dma_cleanup_descriptors - cleanup link descriptors which are completed
680 * @chan: X-Gene DMA channel
688 struct xgene_dma_ring *ring = &chan->rx_ring; in xgene_dma_cleanup_descriptors()
696 spin_lock(&chan->lock); in xgene_dma_cleanup_descriptors()
702 list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) { in xgene_dma_cleanup_descriptors()
704 desc_hw = &ring->desc_hw[ring->head]; in xgene_dma_cleanup_descriptors()
707 if (unlikely(le64_to_cpu(desc_hw->m0) == in xgene_dma_cleanup_descriptors()
711 if (++ring->head == ring->slots) in xgene_dma_cleanup_descriptors()
712 ring->head = 0; in xgene_dma_cleanup_descriptors()
717 desc_hw->m0)), in xgene_dma_cleanup_descriptors()
719 desc_hw->m0))); in xgene_dma_cleanup_descriptors()
725 * We have DMA transactions error here. Dump DMA Tx in xgene_dma_cleanup_descriptors()
727 XGENE_DMA_DESC_DUMP(&desc_sw->desc1, in xgene_dma_cleanup_descriptors()
728 "X-Gene DMA TX DESC1: "); in xgene_dma_cleanup_descriptors()
730 if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) in xgene_dma_cleanup_descriptors()
731 XGENE_DMA_DESC_DUMP(&desc_sw->desc2, in xgene_dma_cleanup_descriptors()
732 "X-Gene DMA TX DESC2: "); in xgene_dma_cleanup_descriptors()
735 "X-Gene DMA RX ERR DESC: "); in xgene_dma_cleanup_descriptors()
739 iowrite32(-1, ring->cmd); in xgene_dma_cleanup_descriptors()
742 desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE); in xgene_dma_cleanup_descriptors()
748 chan->pending -= ((desc_sw->flags & in xgene_dma_cleanup_descriptors()
755 list_move_tail(&desc_sw->node, &ld_completed); in xgene_dma_cleanup_descriptors()
765 spin_unlock(&chan->lock); in xgene_dma_cleanup_descriptors()
779 if (chan->desc_pool) in xgene_dma_alloc_chan_resources()
782 chan->desc_pool = dma_pool_create(chan->name, chan->dev, in xgene_dma_alloc_chan_resources()
785 if (!chan->desc_pool) { in xgene_dma_alloc_chan_resources()
787 return -ENOMEM; in xgene_dma_alloc_chan_resources()
796 * xgene_dma_free_desc_list - Free all descriptors in a queue
797 * @chan: X-Gene DMA channel
800 * LOCKING: must hold chan->lock
817 if (!chan->desc_pool) in xgene_dma_free_chan_resources()
823 spin_lock_bh(&chan->lock); in xgene_dma_free_chan_resources()
826 xgene_dma_free_desc_list(chan, &chan->ld_pending); in xgene_dma_free_chan_resources()
827 xgene_dma_free_desc_list(chan, &chan->ld_running); in xgene_dma_free_chan_resources()
828 xgene_dma_free_desc_list(chan, &chan->ld_completed); in xgene_dma_free_chan_resources()
830 spin_unlock_bh(&chan->lock); in xgene_dma_free_chan_resources()
833 dma_pool_destroy(chan->desc_pool); in xgene_dma_free_chan_resources()
834 chan->desc_pool = NULL; in xgene_dma_free_chan_resources()
864 new->tx.cookie = 0; in xgene_dma_prep_xor()
865 async_tx_ack(&new->tx); in xgene_dma_prep_xor()
868 list_add_tail(&new->node, &first->tx_list); in xgene_dma_prep_xor()
871 new->tx.flags = flags; /* client is in control of this ack */ in xgene_dma_prep_xor()
872 new->tx.cookie = -EBUSY; in xgene_dma_prep_xor()
873 list_splice(&first->tx_list, &new->tx_list); in xgene_dma_prep_xor()
875 return &new->tx; in xgene_dma_prep_xor()
881 xgene_dma_free_desc_list(chan, &first->tx_list); in xgene_dma_prep_xor()
922 new->tx.cookie = 0; in xgene_dma_prep_pq()
923 async_tx_ack(&new->tx); in xgene_dma_prep_pq()
926 list_add_tail(&new->node, &first->tx_list); in xgene_dma_prep_pq()
948 new->tx.flags = flags; /* client is in control of this ack */ in xgene_dma_prep_pq()
949 new->tx.cookie = -EBUSY; in xgene_dma_prep_pq()
950 list_splice(&first->tx_list, &new->tx_list); in xgene_dma_prep_pq()
952 return &new->tx; in xgene_dma_prep_pq()
958 xgene_dma_free_desc_list(chan, &first->tx_list); in xgene_dma_prep_pq()
966 spin_lock_bh(&chan->lock); in xgene_dma_issue_pending()
968 spin_unlock_bh(&chan->lock); in xgene_dma_issue_pending()
985 /* Re-enable DMA channel IRQ */ in xgene_dma_tasklet_cb()
986 enable_irq(chan->rx_irq); in xgene_dma_tasklet_cb()
999 disable_irq_nosync(chan->rx_irq); in xgene_dma_chan_ring_isr()
1006 tasklet_schedule(&chan->tasklet); in xgene_dma_chan_ring_isr()
1017 val = ioread32(pdma->csr_dma + XGENE_DMA_INT); in xgene_dma_err_isr()
1020 iowrite32(val, pdma->csr_dma + XGENE_DMA_INT); in xgene_dma_err_isr()
1025 dev_err(pdma->dev, in xgene_dma_err_isr()
1035 iowrite32(ring->num, ring->pdma->csr_ring + XGENE_DMA_RING_STATE); in xgene_dma_wr_ring_state()
1038 iowrite32(ring->state[i], ring->pdma->csr_ring + in xgene_dma_wr_ring_state()
1044 memset(ring->state, 0, sizeof(u32) * XGENE_DMA_RING_NUM_CONFIG); in xgene_dma_clr_ring_state()
1050 void *ring_cfg = ring->state; in xgene_dma_setup_ring()
1051 u64 addr = ring->desc_paddr; in xgene_dma_setup_ring()
1054 ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE; in xgene_dma_setup_ring()
1062 if (ring->owner == XGENE_DMA_RING_OWNER_DMA) { in xgene_dma_setup_ring()
1075 XGENE_DMA_RING_SIZE_SET(ring_cfg, ring->cfgsize); in xgene_dma_setup_ring()
1081 iowrite32(XGENE_DMA_RING_ID_SETUP(ring->id), in xgene_dma_setup_ring()
1082 ring->pdma->csr_ring + XGENE_DMA_RING_ID); in xgene_dma_setup_ring()
1085 iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring->num), in xgene_dma_setup_ring()
1086 ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF); in xgene_dma_setup_ring()
1088 if (ring->owner != XGENE_DMA_RING_OWNER_CPU) in xgene_dma_setup_ring()
1092 for (i = 0; i < ring->slots; i++) { in xgene_dma_setup_ring()
1095 desc = &ring->desc_hw[i]; in xgene_dma_setup_ring()
1096 desc->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE); in xgene_dma_setup_ring()
1100 val = ioread32(ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE); in xgene_dma_setup_ring()
1101 XGENE_DMA_RING_NE_INT_MODE_SET(val, ring->buf_num); in xgene_dma_setup_ring()
1102 iowrite32(val, ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE); in xgene_dma_setup_ring()
1109 if (ring->owner == XGENE_DMA_RING_OWNER_CPU) { in xgene_dma_clear_ring()
1111 val = ioread32(ring->pdma->csr_ring + in xgene_dma_clear_ring()
1113 XGENE_DMA_RING_NE_INT_MODE_RESET(val, ring->buf_num); in xgene_dma_clear_ring()
1114 iowrite32(val, ring->pdma->csr_ring + in xgene_dma_clear_ring()
1119 ring_id = XGENE_DMA_RING_ID_SETUP(ring->id); in xgene_dma_clear_ring()
1120 iowrite32(ring_id, ring->pdma->csr_ring + XGENE_DMA_RING_ID); in xgene_dma_clear_ring()
1122 iowrite32(0, ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF); in xgene_dma_clear_ring()
1128 ring->cmd_base = ring->pdma->csr_ring_cmd + in xgene_dma_set_ring_cmd()
1129 XGENE_DMA_RING_CMD_BASE_OFFSET((ring->num - in xgene_dma_set_ring_cmd()
1132 ring->cmd = ring->cmd_base + XGENE_DMA_RING_CMD_OFFSET; in xgene_dma_set_ring_cmd()
1158 return -EINVAL; in xgene_dma_get_ring_size()
1169 /* De-allocate DMA ring descriptor */ in xgene_dma_delete_ring_one()
1170 if (ring->desc_vaddr) { in xgene_dma_delete_ring_one()
1171 dma_free_coherent(ring->pdma->dev, ring->size, in xgene_dma_delete_ring_one()
1172 ring->desc_vaddr, ring->desc_paddr); in xgene_dma_delete_ring_one()
1173 ring->desc_vaddr = NULL; in xgene_dma_delete_ring_one()
1179 xgene_dma_delete_ring_one(&chan->rx_ring); in xgene_dma_delete_chan_rings()
1180 xgene_dma_delete_ring_one(&chan->tx_ring); in xgene_dma_delete_chan_rings()
1190 ring->pdma = chan->pdma; in xgene_dma_create_ring_one()
1191 ring->cfgsize = cfgsize; in xgene_dma_create_ring_one()
1192 ring->num = chan->pdma->ring_num++; in xgene_dma_create_ring_one()
1193 ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); in xgene_dma_create_ring_one()
1198 ring->size = ret; in xgene_dma_create_ring_one()
1201 ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size, in xgene_dma_create_ring_one()
1202 &ring->desc_paddr, GFP_KERNEL); in xgene_dma_create_ring_one()
1203 if (!ring->desc_vaddr) { in xgene_dma_create_ring_one()
1205 return -ENOMEM; in xgene_dma_create_ring_one()
1217 struct xgene_dma_ring *rx_ring = &chan->rx_ring; in xgene_dma_create_chan_rings()
1218 struct xgene_dma_ring *tx_ring = &chan->tx_ring; in xgene_dma_create_chan_rings()
1222 rx_ring->owner = XGENE_DMA_RING_OWNER_CPU; in xgene_dma_create_chan_rings()
1223 rx_ring->buf_num = XGENE_DMA_CPU_BUFNUM + chan->id; in xgene_dma_create_chan_rings()
1231 rx_ring->id, rx_ring->num, rx_ring->desc_vaddr); in xgene_dma_create_chan_rings()
1233 /* Create DMA Tx ring descriptor */ in xgene_dma_create_chan_rings()
1234 tx_ring->owner = XGENE_DMA_RING_OWNER_DMA; in xgene_dma_create_chan_rings()
1235 tx_ring->buf_num = XGENE_DMA_BUFNUM + chan->id; in xgene_dma_create_chan_rings()
1244 tx_ring->dst_ring_num = XGENE_DMA_RING_DST_ID(rx_ring->num); in xgene_dma_create_chan_rings()
1247 "Tx ring id 0x%X num %d desc 0x%p\n", in xgene_dma_create_chan_rings()
1248 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); in xgene_dma_create_chan_rings()
1251 chan->max_outstanding = tx_ring->slots; in xgene_dma_create_chan_rings()
1261 ret = xgene_dma_create_chan_rings(&pdma->chan[i]); in xgene_dma_init_rings()
1264 xgene_dma_delete_chan_rings(&pdma->chan[j]); in xgene_dma_init_rings()
1277 val = ioread32(pdma->csr_dma + XGENE_DMA_GCR); in xgene_dma_enable()
1280 iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR); in xgene_dma_enable()
1287 val = ioread32(pdma->csr_dma + XGENE_DMA_GCR); in xgene_dma_disable()
1289 iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR); in xgene_dma_disable()
1299 pdma->csr_dma + XGENE_DMA_RING_INT0_MASK); in xgene_dma_mask_interrupts()
1301 pdma->csr_dma + XGENE_DMA_RING_INT1_MASK); in xgene_dma_mask_interrupts()
1303 pdma->csr_dma + XGENE_DMA_RING_INT2_MASK); in xgene_dma_mask_interrupts()
1305 pdma->csr_dma + XGENE_DMA_RING_INT3_MASK); in xgene_dma_mask_interrupts()
1307 pdma->csr_dma + XGENE_DMA_RING_INT4_MASK); in xgene_dma_mask_interrupts()
1310 iowrite32(XGENE_DMA_INT_ALL_MASK, pdma->csr_dma + XGENE_DMA_INT_MASK); in xgene_dma_mask_interrupts()
1320 pdma->csr_dma + XGENE_DMA_RING_INT0_MASK); in xgene_dma_unmask_interrupts()
1322 pdma->csr_dma + XGENE_DMA_RING_INT1_MASK); in xgene_dma_unmask_interrupts()
1324 pdma->csr_dma + XGENE_DMA_RING_INT2_MASK); in xgene_dma_unmask_interrupts()
1326 pdma->csr_dma + XGENE_DMA_RING_INT3_MASK); in xgene_dma_unmask_interrupts()
1328 pdma->csr_dma + XGENE_DMA_RING_INT4_MASK); in xgene_dma_unmask_interrupts()
1332 pdma->csr_dma + XGENE_DMA_INT_MASK); in xgene_dma_unmask_interrupts()
1341 pdma->csr_dma + XGENE_DMA_CFG_RING_WQ_ASSOC); in xgene_dma_init_hw()
1346 pdma->csr_dma + XGENE_DMA_RAID6_CONT); in xgene_dma_init_hw()
1348 dev_info(pdma->dev, "PQ is disabled in HW\n"); in xgene_dma_init_hw()
1354 val = ioread32(pdma->csr_dma + XGENE_DMA_IPBRR); in xgene_dma_init_hw()
1357 dev_info(pdma->dev, in xgene_dma_init_hw()
1358 "X-Gene DMA v%d.%02d.%02d driver registered %d channels", in xgene_dma_init_hw()
1365 if (ioread32(pdma->csr_ring + XGENE_DMA_RING_CLKEN) && in xgene_dma_init_ring_mngr()
1366 (!ioread32(pdma->csr_ring + XGENE_DMA_RING_SRST))) in xgene_dma_init_ring_mngr()
1369 iowrite32(0x3, pdma->csr_ring + XGENE_DMA_RING_CLKEN); in xgene_dma_init_ring_mngr()
1370 iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_SRST); in xgene_dma_init_ring_mngr()
1373 iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN); in xgene_dma_init_ring_mngr()
1376 ioread32(pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN); in xgene_dma_init_ring_mngr()
1381 if (ioread32(pdma->csr_ring + XGENE_DMA_RING_BLK_MEM_RDY) in xgene_dma_init_ring_mngr()
1383 dev_err(pdma->dev, in xgene_dma_init_ring_mngr()
1385 return -ENODEV; in xgene_dma_init_ring_mngr()
1390 pdma->csr_ring + XGENE_DMA_RING_THRESLD0_SET1); in xgene_dma_init_ring_mngr()
1392 pdma->csr_ring + XGENE_DMA_RING_THRESLD1_SET1); in xgene_dma_init_ring_mngr()
1394 pdma->csr_ring + XGENE_DMA_RING_HYSTERESIS); in xgene_dma_init_ring_mngr()
1398 pdma->csr_ring + XGENE_DMA_RING_CONFIG); in xgene_dma_init_ring_mngr()
1412 iowrite32(0x0, pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN); in xgene_dma_init_mem()
1415 ioread32(pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN); in xgene_dma_init_mem()
1420 if (ioread32(pdma->csr_dma + XGENE_DMA_BLK_MEM_RDY) in xgene_dma_init_mem()
1422 dev_err(pdma->dev, in xgene_dma_init_mem()
1424 return -ENODEV; in xgene_dma_init_mem()
1436 ret = devm_request_irq(pdma->dev, pdma->err_irq, xgene_dma_err_isr, in xgene_dma_request_irqs()
1439 dev_err(pdma->dev, in xgene_dma_request_irqs()
1440 "Failed to register error IRQ %d\n", pdma->err_irq); in xgene_dma_request_irqs()
1446 chan = &pdma->chan[i]; in xgene_dma_request_irqs()
1447 irq_set_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); in xgene_dma_request_irqs()
1448 ret = devm_request_irq(chan->dev, chan->rx_irq, in xgene_dma_request_irqs()
1450 0, chan->name, chan); in xgene_dma_request_irqs()
1453 chan->rx_irq); in xgene_dma_request_irqs()
1454 devm_free_irq(pdma->dev, pdma->err_irq, pdma); in xgene_dma_request_irqs()
1457 chan = &pdma->chan[i]; in xgene_dma_request_irqs()
1458 irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); in xgene_dma_request_irqs()
1459 devm_free_irq(chan->dev, chan->rx_irq, chan); in xgene_dma_request_irqs()
1475 devm_free_irq(pdma->dev, pdma->err_irq, pdma); in xgene_dma_free_irqs()
1478 chan = &pdma->chan[i]; in xgene_dma_free_irqs()
1479 irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); in xgene_dma_free_irqs()
1480 devm_free_irq(chan->dev, chan->rx_irq, chan); in xgene_dma_free_irqs()
1488 dma_cap_zero(dma_dev->cap_mask); in xgene_dma_set_caps()
1492 /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR in xgene_dma_set_caps()
1501 if ((chan->id == XGENE_DMA_PQ_CHANNEL) && in xgene_dma_set_caps()
1502 is_pq_enabled(chan->pdma)) { in xgene_dma_set_caps()
1503 dma_cap_set(DMA_PQ, dma_dev->cap_mask); in xgene_dma_set_caps()
1504 dma_cap_set(DMA_XOR, dma_dev->cap_mask); in xgene_dma_set_caps()
1505 } else if ((chan->id == XGENE_DMA_XOR_CHANNEL) && in xgene_dma_set_caps()
1506 !is_pq_enabled(chan->pdma)) { in xgene_dma_set_caps()
1507 dma_cap_set(DMA_XOR, dma_dev->cap_mask); in xgene_dma_set_caps()
1511 dma_dev->dev = chan->dev; in xgene_dma_set_caps()
1512 dma_dev->device_alloc_chan_resources = xgene_dma_alloc_chan_resources; in xgene_dma_set_caps()
1513 dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources; in xgene_dma_set_caps()
1514 dma_dev->device_issue_pending = xgene_dma_issue_pending; in xgene_dma_set_caps()
1515 dma_dev->device_tx_status = xgene_dma_tx_status; in xgene_dma_set_caps()
1517 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { in xgene_dma_set_caps()
1518 dma_dev->device_prep_dma_xor = xgene_dma_prep_xor; in xgene_dma_set_caps()
1519 dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC; in xgene_dma_set_caps()
1520 dma_dev->xor_align = DMAENGINE_ALIGN_64_BYTES; in xgene_dma_set_caps()
1523 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { in xgene_dma_set_caps()
1524 dma_dev->device_prep_dma_pq = xgene_dma_prep_pq; in xgene_dma_set_caps()
1525 dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC; in xgene_dma_set_caps()
1526 dma_dev->pq_align = DMAENGINE_ALIGN_64_BYTES; in xgene_dma_set_caps()
1532 struct xgene_dma_chan *chan = &pdma->chan[id]; in xgene_dma_async_register()
1533 struct dma_device *dma_dev = &pdma->dma_dev[id]; in xgene_dma_async_register()
1536 chan->dma_chan.device = dma_dev; in xgene_dma_async_register()
1538 spin_lock_init(&chan->lock); in xgene_dma_async_register()
1539 INIT_LIST_HEAD(&chan->ld_pending); in xgene_dma_async_register()
1540 INIT_LIST_HEAD(&chan->ld_running); in xgene_dma_async_register()
1541 INIT_LIST_HEAD(&chan->ld_completed); in xgene_dma_async_register()
1542 tasklet_setup(&chan->tasklet, xgene_dma_tasklet_cb); in xgene_dma_async_register()
1544 chan->pending = 0; in xgene_dma_async_register()
1545 chan->desc_pool = NULL; in xgene_dma_async_register()
1546 dma_cookie_init(&chan->dma_chan); in xgene_dma_async_register()
1552 INIT_LIST_HEAD(&dma_dev->channels); in xgene_dma_async_register()
1553 list_add_tail(&chan->dma_chan.device_node, &dma_dev->channels); in xgene_dma_async_register()
1559 tasklet_kill(&chan->tasklet); in xgene_dma_async_register()
1565 dev_info(pdma->dev, in xgene_dma_async_register()
1566 "%s: CAPABILITY ( %s%s)\n", dma_chan_name(&chan->dma_chan), in xgene_dma_async_register()
1567 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "", in xgene_dma_async_register()
1568 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : ""); in xgene_dma_async_register()
1581 dma_async_device_unregister(&pdma->dma_dev[j]); in xgene_dma_init_async()
1582 tasklet_kill(&pdma->chan[j].tasklet); in xgene_dma_init_async()
1597 dma_async_device_unregister(&pdma->dma_dev[i]); in xgene_dma_async_unregister()
1605 pdma->ring_num = XGENE_DMA_RING_NUM; in xgene_dma_init_channels()
1608 chan = &pdma->chan[i]; in xgene_dma_init_channels()
1609 chan->dev = pdma->dev; in xgene_dma_init_channels()
1610 chan->pdma = pdma; in xgene_dma_init_channels()
1611 chan->id = i; in xgene_dma_init_channels()
1612 snprintf(chan->name, sizeof(chan->name), "dmachan%d", chan->id); in xgene_dma_init_channels()
1625 dev_err(&pdev->dev, "Failed to get csr region\n"); in xgene_dma_get_resources()
1626 return -ENXIO; in xgene_dma_get_resources()
1629 pdma->csr_dma = devm_ioremap(&pdev->dev, res->start, in xgene_dma_get_resources()
1631 if (!pdma->csr_dma) { in xgene_dma_get_resources()
1632 dev_err(&pdev->dev, "Failed to ioremap csr region"); in xgene_dma_get_resources()
1633 return -ENOMEM; in xgene_dma_get_resources()
1639 dev_err(&pdev->dev, "Failed to get ring csr region\n"); in xgene_dma_get_resources()
1640 return -ENXIO; in xgene_dma_get_resources()
1643 pdma->csr_ring = devm_ioremap(&pdev->dev, res->start, in xgene_dma_get_resources()
1645 if (!pdma->csr_ring) { in xgene_dma_get_resources()
1646 dev_err(&pdev->dev, "Failed to ioremap ring csr region"); in xgene_dma_get_resources()
1647 return -ENOMEM; in xgene_dma_get_resources()
1653 dev_err(&pdev->dev, "Failed to get ring cmd csr region\n"); in xgene_dma_get_resources()
1654 return -ENXIO; in xgene_dma_get_resources()
1657 pdma->csr_ring_cmd = devm_ioremap(&pdev->dev, res->start, in xgene_dma_get_resources()
1659 if (!pdma->csr_ring_cmd) { in xgene_dma_get_resources()
1660 dev_err(&pdev->dev, "Failed to ioremap ring cmd csr region"); in xgene_dma_get_resources()
1661 return -ENOMEM; in xgene_dma_get_resources()
1664 pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET; in xgene_dma_get_resources()
1669 dev_err(&pdev->dev, "Failed to get efuse csr region\n"); in xgene_dma_get_resources()
1670 return -ENXIO; in xgene_dma_get_resources()
1673 pdma->csr_efuse = devm_ioremap(&pdev->dev, res->start, in xgene_dma_get_resources()
1675 if (!pdma->csr_efuse) { in xgene_dma_get_resources()
1676 dev_err(&pdev->dev, "Failed to ioremap efuse csr region"); in xgene_dma_get_resources()
1677 return -ENOMEM; in xgene_dma_get_resources()
1683 return -ENXIO; in xgene_dma_get_resources()
1685 pdma->err_irq = irq; in xgene_dma_get_resources()
1691 return -ENXIO; in xgene_dma_get_resources()
1693 pdma->chan[i - 1].rx_irq = irq; in xgene_dma_get_resources()
1704 pdma = devm_kzalloc(&pdev->dev, sizeof(*pdma), GFP_KERNEL); in xgene_dma_probe()
1706 return -ENOMEM; in xgene_dma_probe()
1708 pdma->dev = &pdev->dev; in xgene_dma_probe()
1715 pdma->clk = devm_clk_get(&pdev->dev, NULL); in xgene_dma_probe()
1716 if (IS_ERR(pdma->clk) && !ACPI_COMPANION(&pdev->dev)) { in xgene_dma_probe()
1717 dev_err(&pdev->dev, "Failed to get clk\n"); in xgene_dma_probe()
1718 return PTR_ERR(pdma->clk); in xgene_dma_probe()
1722 if (!IS_ERR(pdma->clk)) { in xgene_dma_probe()
1723 ret = clk_prepare_enable(pdma->clk); in xgene_dma_probe()
1725 dev_err(&pdev->dev, "Failed to enable clk %d\n", ret); in xgene_dma_probe()
1735 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(42)); in xgene_dma_probe()
1737 dev_err(&pdev->dev, "No usable DMA configuration\n"); in xgene_dma_probe()
1768 xgene_dma_delete_chan_rings(&pdma->chan[i]); in xgene_dma_probe()
1772 if (!IS_ERR(pdma->clk)) in xgene_dma_probe()
1773 clk_disable_unprepare(pdma->clk); in xgene_dma_probe()
1792 chan = &pdma->chan[i]; in xgene_dma_remove()
1793 tasklet_kill(&chan->tasklet); in xgene_dma_remove()
1797 if (!IS_ERR(pdma->clk)) in xgene_dma_remove()
1798 clk_disable_unprepare(pdma->clk); in xgene_dma_remove()
1812 {.compatible = "apm,xgene-storm-dma",},
1821 .name = "X-Gene-DMA",
1829 MODULE_DESCRIPTION("APM X-Gene SoC DMA driver");