Lines Matching +full:num +full:- +full:rings
19 * manager provides a set of rings which can be used to submit
23 * rings where each mailbox channel represents a separate FlexRM ring.
33 #include <linux/dma-mapping.h>
40 #include <linux/mailbox/brcm-message.h>
60 (!((addr) & ((0x1 << RING_BD_ALIGN_ORDER) - 1)))
76 /* Per-Ring register offsets */
268 int num; member
295 struct flexrm_ring *rings; member
326 return -EIO; in flexrm_cmpl_desc_to_error()
332 return -ETIMEDOUT; in flexrm_cmpl_desc_to_error()
417 * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST, in flexrm_enqueue_desc()
418 * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors in flexrm_enqueue_desc()
421 * means we can only have 31 non-HEADER descriptors following one in flexrm_enqueue_desc()
424 * In general use, number of non-HEADER descriptors can easily go in flexrm_enqueue_desc()
439 if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) { in flexrm_enqueue_desc()
441 nhavail = (nhcnt - nhpos); in flexrm_enqueue_desc()
578 if (!msg->spu.src || !msg->spu.dst) in flexrm_spu_sanity_check()
580 for (sg = msg->spu.src; sg; sg = sg_next(sg)) { in flexrm_spu_sanity_check()
581 if (sg->length & 0xf) { in flexrm_spu_sanity_check()
582 if (sg->length > SRC_LENGTH_MASK) in flexrm_spu_sanity_check()
585 if (sg->length > (MSRC_LENGTH_MASK * 16)) in flexrm_spu_sanity_check()
589 for (sg = msg->spu.dst; sg; sg = sg_next(sg)) { in flexrm_spu_sanity_check()
590 if (sg->length & 0xf) { in flexrm_spu_sanity_check()
591 if (sg->length > DST_LENGTH_MASK) in flexrm_spu_sanity_check()
594 if (sg->length > (MDST_LENGTH_MASK * 16)) in flexrm_spu_sanity_check()
606 struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst; in flexrm_spu_estimate_nonheader_desc_count()
611 dst_target = src_sg->length; in flexrm_spu_estimate_nonheader_desc_count()
618 if (dst_sg->length < dst_target) in flexrm_spu_estimate_nonheader_desc_count()
619 dst_target -= dst_sg->length; in flexrm_spu_estimate_nonheader_desc_count()
633 rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src), in flexrm_spu_dma_map()
638 rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst), in flexrm_spu_dma_map()
641 dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src), in flexrm_spu_dma_map()
651 dma_unmap_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst), in flexrm_spu_dma_unmap()
653 dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src), in flexrm_spu_dma_unmap()
665 struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst; in flexrm_spu_write_descs()
696 dst_target -= sg_dma_len(dst_sg); in flexrm_spu_write_descs()
719 if (!msg->sba.cmds || !msg->sba.cmds_count) in flexrm_sba_sanity_check()
722 for (i = 0; i < msg->sba.cmds_count; i++) { in flexrm_sba_sanity_check()
723 if (((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) || in flexrm_sba_sanity_check()
724 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) && in flexrm_sba_sanity_check()
725 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT)) in flexrm_sba_sanity_check()
727 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) && in flexrm_sba_sanity_check()
728 (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK)) in flexrm_sba_sanity_check()
730 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C) && in flexrm_sba_sanity_check()
731 (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK)) in flexrm_sba_sanity_check()
733 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) && in flexrm_sba_sanity_check()
734 (msg->sba.cmds[i].resp_len > DSTT_LENGTH_MASK)) in flexrm_sba_sanity_check()
736 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) && in flexrm_sba_sanity_check()
737 (msg->sba.cmds[i].data_len > DSTT_LENGTH_MASK)) in flexrm_sba_sanity_check()
749 for (i = 0; i < msg->sba.cmds_count; i++) { in flexrm_sba_estimate_nonheader_desc_count()
752 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) || in flexrm_sba_estimate_nonheader_desc_count()
753 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) in flexrm_sba_estimate_nonheader_desc_count()
756 if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) in flexrm_sba_estimate_nonheader_desc_count()
759 if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) in flexrm_sba_estimate_nonheader_desc_count()
776 for (i = 0; i < msg->sba.cmds_count; i++) { in flexrm_sba_write_descs()
777 c = &msg->sba.cmds[i]; in flexrm_sba_write_descs()
779 if ((c->flags & BRCM_SBA_CMD_HAS_RESP) && in flexrm_sba_write_descs()
780 (c->flags & BRCM_SBA_CMD_HAS_OUTPUT)) { in flexrm_sba_write_descs()
782 d = flexrm_dst_desc(c->resp, c->resp_len); in flexrm_sba_write_descs()
787 } else if (c->flags & BRCM_SBA_CMD_HAS_RESP) { in flexrm_sba_write_descs()
789 d = flexrm_dstt_desc(c->resp, c->resp_len); in flexrm_sba_write_descs()
796 if (c->flags & BRCM_SBA_CMD_HAS_OUTPUT) { in flexrm_sba_write_descs()
798 d = flexrm_dstt_desc(c->data, c->data_len); in flexrm_sba_write_descs()
805 if (c->flags & BRCM_SBA_CMD_TYPE_B) { in flexrm_sba_write_descs()
807 d = flexrm_imm_desc(c->cmd); in flexrm_sba_write_descs()
814 d = flexrm_immt_desc(c->cmd); in flexrm_sba_write_descs()
821 if ((c->flags & BRCM_SBA_CMD_TYPE_B) || in flexrm_sba_write_descs()
822 (c->flags & BRCM_SBA_CMD_TYPE_C)) { in flexrm_sba_write_descs()
824 d = flexrm_srct_desc(c->data, c->data_len); in flexrm_sba_write_descs()
849 switch (msg->type) { in flexrm_sanity_check()
864 switch (msg->type) { in flexrm_estimate_nonheader_desc_count()
877 return -EINVAL; in flexrm_dma_map()
879 switch (msg->type) { in flexrm_dma_map()
894 switch (msg->type) { in flexrm_dma_unmap()
908 return ERR_PTR(-ENOTSUPP); in flexrm_write_descs()
911 return ERR_PTR(-ERANGE); in flexrm_write_descs()
913 switch (msg->type) { in flexrm_write_descs()
923 return ERR_PTR(-ENOTSUPP); in flexrm_write_descs()
936 seq_printf(file, "%-5s %-9s %-18s %-10s %-18s %-10s\n", in flexrm_write_config_in_seqfile()
940 for (i = 0; i < mbox->num_rings; i++) { in flexrm_write_config_in_seqfile()
941 ring = &mbox->rings[i]; in flexrm_write_config_in_seqfile()
942 if (readl(ring->regs + RING_CONTROL) & in flexrm_write_config_in_seqfile()
948 "%-5d %-9s 0x%016llx 0x%08x 0x%016llx 0x%08x\n", in flexrm_write_config_in_seqfile()
949 ring->num, state, in flexrm_write_config_in_seqfile()
950 (unsigned long long)ring->bd_dma_base, in flexrm_write_config_in_seqfile()
952 (unsigned long long)ring->cmpl_dma_base, in flexrm_write_config_in_seqfile()
964 seq_printf(file, "%-5s %-10s %-10s %-10s %-11s %-11s\n", in flexrm_write_stats_in_seqfile()
968 for (i = 0; i < mbox->num_rings; i++) { in flexrm_write_stats_in_seqfile()
969 ring = &mbox->rings[i]; in flexrm_write_stats_in_seqfile()
970 bd_read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR); in flexrm_write_stats_in_seqfile()
971 val = readl_relaxed(ring->regs + RING_BD_START_ADDR); in flexrm_write_stats_in_seqfile()
973 bd_read_offset += (u32)(BD_START_ADDR_DECODE(val) - in flexrm_write_stats_in_seqfile()
974 ring->bd_dma_base); in flexrm_write_stats_in_seqfile()
975 seq_printf(file, "%-5d 0x%08x 0x%08x 0x%08x %-11d %-11d\n", in flexrm_write_stats_in_seqfile()
976 ring->num, in flexrm_write_stats_in_seqfile()
978 (u32)ring->bd_write_offset, in flexrm_write_stats_in_seqfile()
979 (u32)ring->cmpl_read_offset, in flexrm_write_stats_in_seqfile()
980 (u32)atomic_read(&ring->msg_send_count), in flexrm_write_stats_in_seqfile()
981 (u32)atomic_read(&ring->msg_cmpl_count)); in flexrm_write_stats_in_seqfile()
998 return -EIO; in flexrm_new_request()
999 msg->error = 0; in flexrm_new_request()
1002 spin_lock_irqsave(&ring->lock, flags); in flexrm_new_request()
1003 reqid = bitmap_find_free_region(ring->requests_bmap, in flexrm_new_request()
1005 spin_unlock_irqrestore(&ring->lock, flags); in flexrm_new_request()
1007 return -ENOSPC; in flexrm_new_request()
1008 ring->requests[reqid] = msg; in flexrm_new_request()
1011 ret = flexrm_dma_map(ring->mbox->dev, msg); in flexrm_new_request()
1013 ring->requests[reqid] = NULL; in flexrm_new_request()
1014 spin_lock_irqsave(&ring->lock, flags); in flexrm_new_request()
1015 bitmap_release_region(ring->requests_bmap, reqid, 0); in flexrm_new_request()
1016 spin_unlock_irqrestore(&ring->lock, flags); in flexrm_new_request()
1021 read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR); in flexrm_new_request()
1022 val = readl_relaxed(ring->regs + RING_BD_START_ADDR); in flexrm_new_request()
1024 read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base); in flexrm_new_request()
1027 * Number required descriptors = number of non-header descriptors + in flexrm_new_request()
1035 write_offset = ring->bd_write_offset; in flexrm_new_request()
1037 if (!flexrm_is_next_table_desc(ring->bd_base + write_offset)) in flexrm_new_request()
1038 count--; in flexrm_new_request()
1046 ret = -ENOSPC; in flexrm_new_request()
1053 ring->bd_base + ring->bd_write_offset, in flexrm_new_request()
1054 RING_BD_TOGGLE_VALID(ring->bd_write_offset), in flexrm_new_request()
1055 ring->bd_base, ring->bd_base + RING_BD_SIZE); in flexrm_new_request()
1063 ring->bd_write_offset = (unsigned long)(next - ring->bd_base); in flexrm_new_request()
1066 atomic_inc_return(&ring->msg_send_count); in flexrm_new_request()
1070 msg->error = ret; in flexrm_new_request()
1074 flexrm_dma_unmap(ring->mbox->dev, msg); in flexrm_new_request()
1075 ring->requests[reqid] = NULL; in flexrm_new_request()
1076 spin_lock_irqsave(&ring->lock, flags); in flexrm_new_request()
1077 bitmap_release_region(ring->requests_bmap, reqid, 0); in flexrm_new_request()
1078 spin_unlock_irqrestore(&ring->lock, flags); in flexrm_new_request()
1091 struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num]; in flexrm_process_completions()
1093 spin_lock_irqsave(&ring->lock, flags); in flexrm_process_completions()
1103 cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR); in flexrm_process_completions()
1105 cmpl_read_offset = ring->cmpl_read_offset; in flexrm_process_completions()
1106 ring->cmpl_read_offset = cmpl_write_offset; in flexrm_process_completions()
1108 spin_unlock_irqrestore(&ring->lock, flags); in flexrm_process_completions()
1114 desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset)); in flexrm_process_completions()
1124 dev_warn(ring->mbox->dev, in flexrm_process_completions()
1126 ring->num, (unsigned long)desc, err); in flexrm_process_completions()
1133 msg = ring->requests[reqid]; in flexrm_process_completions()
1135 dev_warn(ring->mbox->dev, in flexrm_process_completions()
1137 ring->num, (unsigned long)desc); in flexrm_process_completions()
1142 ring->requests[reqid] = NULL; in flexrm_process_completions()
1143 spin_lock_irqsave(&ring->lock, flags); in flexrm_process_completions()
1144 bitmap_release_region(ring->requests_bmap, reqid, 0); in flexrm_process_completions()
1145 spin_unlock_irqrestore(&ring->lock, flags); in flexrm_process_completions()
1148 flexrm_dma_unmap(ring->mbox->dev, msg); in flexrm_process_completions()
1150 /* Give-back message to mailbox client */ in flexrm_process_completions()
1151 msg->error = err; in flexrm_process_completions()
1155 atomic_inc_return(&ring->msg_cmpl_count); in flexrm_process_completions()
1166 struct flexrm_mbox *mbox = dev_get_drvdata(file->private); in flexrm_debugfs_conf_show()
1176 struct flexrm_mbox *mbox = dev_get_drvdata(file->private); in flexrm_debugfs_stats_show()
1206 struct flexrm_ring *ring = chan->con_priv; in flexrm_send_data()
1209 if (msg->type == BRCM_MESSAGE_BATCH) { in flexrm_send_data()
1210 for (i = msg->batch.msgs_queued; in flexrm_send_data()
1211 i < msg->batch.msgs_count; i++) { in flexrm_send_data()
1213 &msg->batch.msgs[i]); in flexrm_send_data()
1215 msg->error = rc; in flexrm_send_data()
1218 msg->batch.msgs_queued++; in flexrm_send_data()
1228 int cnt = flexrm_process_completions(chan->con_priv); in flexrm_peek_data()
1239 struct flexrm_ring *ring = chan->con_priv; in flexrm_startup()
1242 ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool, in flexrm_startup()
1243 GFP_KERNEL, &ring->bd_dma_base); in flexrm_startup()
1244 if (!ring->bd_base) { in flexrm_startup()
1245 dev_err(ring->mbox->dev, in flexrm_startup()
1247 ring->num); in flexrm_startup()
1248 ret = -ENOMEM; in flexrm_startup()
1257 next_addr += ring->bd_dma_base; in flexrm_startup()
1263 flexrm_write_desc(ring->bd_base + off, d); in flexrm_startup()
1267 ring->cmpl_base = dma_pool_zalloc(ring->mbox->cmpl_pool, in flexrm_startup()
1268 GFP_KERNEL, &ring->cmpl_dma_base); in flexrm_startup()
1269 if (!ring->cmpl_base) { in flexrm_startup()
1270 dev_err(ring->mbox->dev, in flexrm_startup()
1272 ring->num); in flexrm_startup()
1273 ret = -ENOMEM; in flexrm_startup()
1278 if (ring->irq == UINT_MAX) { in flexrm_startup()
1279 dev_err(ring->mbox->dev, in flexrm_startup()
1280 "ring%d IRQ not available\n", ring->num); in flexrm_startup()
1281 ret = -ENODEV; in flexrm_startup()
1284 ret = request_threaded_irq(ring->irq, in flexrm_startup()
1287 0, dev_name(ring->mbox->dev), ring); in flexrm_startup()
1289 dev_err(ring->mbox->dev, in flexrm_startup()
1290 "failed to request ring%d IRQ\n", ring->num); in flexrm_startup()
1293 ring->irq_requested = true; in flexrm_startup()
1296 ring->irq_aff_hint = CPU_MASK_NONE; in flexrm_startup()
1297 val = ring->mbox->num_rings; in flexrm_startup()
1299 cpumask_set_cpu((ring->num / val) % num_online_cpus(), in flexrm_startup()
1300 &ring->irq_aff_hint); in flexrm_startup()
1301 ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint); in flexrm_startup()
1303 dev_err(ring->mbox->dev, in flexrm_startup()
1305 ring->num); in flexrm_startup()
1310 writel_relaxed(0x0, ring->regs + RING_CONTROL); in flexrm_startup()
1313 val = BD_START_ADDR_VALUE(ring->bd_dma_base); in flexrm_startup()
1314 writel_relaxed(val, ring->regs + RING_BD_START_ADDR); in flexrm_startup()
1317 ring->bd_write_offset = in flexrm_startup()
1318 readl_relaxed(ring->regs + RING_BD_WRITE_PTR); in flexrm_startup()
1319 ring->bd_write_offset *= RING_DESC_SIZE; in flexrm_startup()
1322 val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base); in flexrm_startup()
1323 writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR); in flexrm_startup()
1326 ring->cmpl_read_offset = in flexrm_startup()
1327 readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR); in flexrm_startup()
1328 ring->cmpl_read_offset *= RING_DESC_SIZE; in flexrm_startup()
1331 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS); in flexrm_startup()
1332 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS); in flexrm_startup()
1333 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS); in flexrm_startup()
1334 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS); in flexrm_startup()
1335 readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND); in flexrm_startup()
1339 val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT); in flexrm_startup()
1341 val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT; in flexrm_startup()
1342 writel_relaxed(val, ring->regs + RING_MSI_CONTROL); in flexrm_startup()
1346 writel_relaxed(val, ring->regs + RING_CONTROL); in flexrm_startup()
1349 atomic_set(&ring->msg_send_count, 0); in flexrm_startup()
1350 atomic_set(&ring->msg_cmpl_count, 0); in flexrm_startup()
1355 free_irq(ring->irq, ring); in flexrm_startup()
1356 ring->irq_requested = false; in flexrm_startup()
1358 dma_pool_free(ring->mbox->cmpl_pool, in flexrm_startup()
1359 ring->cmpl_base, ring->cmpl_dma_base); in flexrm_startup()
1360 ring->cmpl_base = NULL; in flexrm_startup()
1362 dma_pool_free(ring->mbox->bd_pool, in flexrm_startup()
1363 ring->bd_base, ring->bd_dma_base); in flexrm_startup()
1364 ring->bd_base = NULL; in flexrm_startup()
1374 struct flexrm_ring *ring = chan->con_priv; in flexrm_shutdown()
1377 writel_relaxed(0x0, ring->regs + RING_CONTROL); in flexrm_shutdown()
1382 ring->regs + RING_CONTROL); in flexrm_shutdown()
1384 if (readl_relaxed(ring->regs + RING_FLUSH_DONE) & in flexrm_shutdown()
1388 } while (--timeout); in flexrm_shutdown()
1390 dev_err(ring->mbox->dev, in flexrm_shutdown()
1391 "setting ring%d flush state timedout\n", ring->num); in flexrm_shutdown()
1395 writel_relaxed(0x0, ring->regs + RING_CONTROL); in flexrm_shutdown()
1397 if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) & in flexrm_shutdown()
1401 } while (--timeout); in flexrm_shutdown()
1403 dev_err(ring->mbox->dev, in flexrm_shutdown()
1404 "clearing ring%d flush state timedout\n", ring->num); in flexrm_shutdown()
1406 /* Abort all in-flight requests */ in flexrm_shutdown()
1408 msg = ring->requests[reqid]; in flexrm_shutdown()
1413 ring->requests[reqid] = NULL; in flexrm_shutdown()
1416 flexrm_dma_unmap(ring->mbox->dev, msg); in flexrm_shutdown()
1418 /* Give-back message to mailbox client */ in flexrm_shutdown()
1419 msg->error = -EIO; in flexrm_shutdown()
1424 bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT); in flexrm_shutdown()
1427 if (ring->irq_requested) { in flexrm_shutdown()
1428 irq_set_affinity_hint(ring->irq, NULL); in flexrm_shutdown()
1429 free_irq(ring->irq, ring); in flexrm_shutdown()
1430 ring->irq_requested = false; in flexrm_shutdown()
1433 /* Free-up completion descriptor ring */ in flexrm_shutdown()
1434 if (ring->cmpl_base) { in flexrm_shutdown()
1435 dma_pool_free(ring->mbox->cmpl_pool, in flexrm_shutdown()
1436 ring->cmpl_base, ring->cmpl_dma_base); in flexrm_shutdown()
1437 ring->cmpl_base = NULL; in flexrm_shutdown()
1440 /* Free-up BD descriptor ring */ in flexrm_shutdown()
1441 if (ring->bd_base) { in flexrm_shutdown()
1442 dma_pool_free(ring->mbox->bd_pool, in flexrm_shutdown()
1443 ring->bd_base, ring->bd_dma_base); in flexrm_shutdown()
1444 ring->bd_base = NULL; in flexrm_shutdown()
1461 if (pa->args_count < 3) in flexrm_mbox_of_xlate()
1462 return ERR_PTR(-EINVAL); in flexrm_mbox_of_xlate()
1464 if (pa->args[0] >= cntlr->num_chans) in flexrm_mbox_of_xlate()
1465 return ERR_PTR(-ENOENT); in flexrm_mbox_of_xlate()
1467 if (pa->args[1] > MSI_COUNT_MASK) in flexrm_mbox_of_xlate()
1468 return ERR_PTR(-EINVAL); in flexrm_mbox_of_xlate()
1470 if (pa->args[2] > MSI_TIMER_VAL_MASK) in flexrm_mbox_of_xlate()
1471 return ERR_PTR(-EINVAL); in flexrm_mbox_of_xlate()
1473 chan = &cntlr->chans[pa->args[0]]; in flexrm_mbox_of_xlate()
1474 ring = chan->con_priv; in flexrm_mbox_of_xlate()
1475 ring->msi_count_threshold = pa->args[1]; in flexrm_mbox_of_xlate()
1476 ring->msi_timer_val = pa->args[2]; in flexrm_mbox_of_xlate()
1487 struct flexrm_ring *ring = &mbox->rings[desc->platform.msi_index]; in flexrm_mbox_msi_write()
1489 /* Configure per-Ring MSI registers */ in flexrm_mbox_msi_write()
1490 writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS); in flexrm_mbox_msi_write()
1491 writel_relaxed(msg->address_hi, ring->regs + RING_MSI_ADDR_MS); in flexrm_mbox_msi_write()
1492 writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE); in flexrm_mbox_msi_write()
1504 struct device *dev = &pdev->dev; in flexrm_mbox_probe()
1509 ret = -ENOMEM; in flexrm_mbox_probe()
1512 mbox->dev = dev; in flexrm_mbox_probe()
1518 ret = -ENODEV; in flexrm_mbox_probe()
1522 /* Map registers of all rings */ in flexrm_mbox_probe()
1523 mbox->regs = devm_ioremap_resource(&pdev->dev, iomem); in flexrm_mbox_probe()
1524 if (IS_ERR(mbox->regs)) { in flexrm_mbox_probe()
1525 ret = PTR_ERR(mbox->regs); in flexrm_mbox_probe()
1526 dev_err(&pdev->dev, "Failed to remap mailbox regs: %d\n", ret); in flexrm_mbox_probe()
1529 regs_end = mbox->regs + resource_size(iomem); in flexrm_mbox_probe()
1531 /* Scan and count available rings */ in flexrm_mbox_probe()
1532 mbox->num_rings = 0; in flexrm_mbox_probe()
1533 for (regs = mbox->regs; regs < regs_end; regs += RING_REGS_SIZE) { in flexrm_mbox_probe()
1535 mbox->num_rings++; in flexrm_mbox_probe()
1537 if (!mbox->num_rings) { in flexrm_mbox_probe()
1538 ret = -ENODEV; in flexrm_mbox_probe()
1543 ring = devm_kcalloc(dev, mbox->num_rings, sizeof(*ring), GFP_KERNEL); in flexrm_mbox_probe()
1545 ret = -ENOMEM; in flexrm_mbox_probe()
1548 mbox->rings = ring; in flexrm_mbox_probe()
1551 regs = mbox->regs; in flexrm_mbox_probe()
1552 for (index = 0; index < mbox->num_rings; index++) { in flexrm_mbox_probe()
1553 ring = &mbox->rings[index]; in flexrm_mbox_probe()
1554 ring->num = index; in flexrm_mbox_probe()
1555 ring->mbox = mbox; in flexrm_mbox_probe()
1560 ret = -ENODEV; in flexrm_mbox_probe()
1563 ring->regs = regs; in flexrm_mbox_probe()
1565 ring->irq = UINT_MAX; in flexrm_mbox_probe()
1566 ring->irq_requested = false; in flexrm_mbox_probe()
1567 ring->msi_timer_val = MSI_TIMER_VAL_MASK; in flexrm_mbox_probe()
1568 ring->msi_count_threshold = 0x1; in flexrm_mbox_probe()
1569 memset(ring->requests, 0, sizeof(ring->requests)); in flexrm_mbox_probe()
1570 ring->bd_base = NULL; in flexrm_mbox_probe()
1571 ring->bd_dma_base = 0; in flexrm_mbox_probe()
1572 ring->cmpl_base = NULL; in flexrm_mbox_probe()
1573 ring->cmpl_dma_base = 0; in flexrm_mbox_probe()
1574 atomic_set(&ring->msg_send_count, 0); in flexrm_mbox_probe()
1575 atomic_set(&ring->msg_cmpl_count, 0); in flexrm_mbox_probe()
1576 spin_lock_init(&ring->lock); in flexrm_mbox_probe()
1577 bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT); in flexrm_mbox_probe()
1578 ring->cmpl_read_offset = 0; in flexrm_mbox_probe()
1581 /* FlexRM is capable of 40-bit physical addresses only */ in flexrm_mbox_probe()
1590 mbox->bd_pool = dma_pool_create("bd", dev, RING_BD_SIZE, in flexrm_mbox_probe()
1592 if (!mbox->bd_pool) { in flexrm_mbox_probe()
1593 ret = -ENOMEM; in flexrm_mbox_probe()
1598 mbox->cmpl_pool = dma_pool_create("cmpl", dev, RING_CMPL_SIZE, in flexrm_mbox_probe()
1600 if (!mbox->cmpl_pool) { in flexrm_mbox_probe()
1601 ret = -ENOMEM; in flexrm_mbox_probe()
1606 ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings, in flexrm_mbox_probe()
1613 ring = &mbox->rings[desc->platform.msi_index]; in flexrm_mbox_probe()
1614 ring->irq = desc->irq; in flexrm_mbox_probe()
1622 mbox->root = debugfs_create_dir(dev_name(mbox->dev), NULL); in flexrm_mbox_probe()
1625 debugfs_create_devm_seqfile(mbox->dev, "config", mbox->root, in flexrm_mbox_probe()
1629 debugfs_create_devm_seqfile(mbox->dev, "stats", mbox->root, in flexrm_mbox_probe()
1635 mbox->controller.txdone_irq = false; in flexrm_mbox_probe()
1636 mbox->controller.txdone_poll = false; in flexrm_mbox_probe()
1637 mbox->controller.ops = &flexrm_mbox_chan_ops; in flexrm_mbox_probe()
1638 mbox->controller.dev = dev; in flexrm_mbox_probe()
1639 mbox->controller.num_chans = mbox->num_rings; in flexrm_mbox_probe()
1640 mbox->controller.of_xlate = flexrm_mbox_of_xlate; in flexrm_mbox_probe()
1641 mbox->controller.chans = devm_kcalloc(dev, mbox->num_rings, in flexrm_mbox_probe()
1642 sizeof(*mbox->controller.chans), GFP_KERNEL); in flexrm_mbox_probe()
1643 if (!mbox->controller.chans) { in flexrm_mbox_probe()
1644 ret = -ENOMEM; in flexrm_mbox_probe()
1647 for (index = 0; index < mbox->num_rings; index++) in flexrm_mbox_probe()
1648 mbox->controller.chans[index].con_priv = &mbox->rings[index]; in flexrm_mbox_probe()
1651 ret = devm_mbox_controller_register(dev, &mbox->controller); in flexrm_mbox_probe()
1656 mbox->controller.num_chans); in flexrm_mbox_probe()
1661 debugfs_remove_recursive(mbox->root); in flexrm_mbox_probe()
1664 dma_pool_destroy(mbox->cmpl_pool); in flexrm_mbox_probe()
1666 dma_pool_destroy(mbox->bd_pool); in flexrm_mbox_probe()
1673 struct device *dev = &pdev->dev; in flexrm_mbox_remove()
1676 debugfs_remove_recursive(mbox->root); in flexrm_mbox_remove()
1680 dma_pool_destroy(mbox->cmpl_pool); in flexrm_mbox_remove()
1681 dma_pool_destroy(mbox->bd_pool); in flexrm_mbox_remove()
1687 { .compatible = "brcm,iproc-flexrm-mbox", },
1694 .name = "brcm-flexrm-mbox",