• Home
  • Raw
  • Download

Lines Matching +full:dfx +full:- +full:bus

1 // SPDX-License-Identifier: GPL-2.0
8 #include <linux/dma-mapping.h>
59 #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
69 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
71 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
77 #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
80 #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
195 ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
209 ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
212 (qc)->head = 0; \
213 (qc)->tail = 0; \
214 (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \
215 (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \
216 (qc)->dw3 = 0; \
217 (qc)->w8 = 0; \
218 (qc)->rsvd0 = 0; \
219 (qc)->pasid = cpu_to_le16(pasid); \
220 (qc)->w11 = 0; \
221 (qc)->rsvd1 = 0; \
392 enum qm_state curr = atomic_read(&qm->status.flags); in qm_avail_state()
412 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n", in qm_avail_state()
416 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n", in qm_avail_state()
425 enum qm_state qm_curr = atomic_read(&qm->status.flags); in qm_qp_avail_state()
430 qp_curr = atomic_read(&qp->qp_status.flags); in qm_qp_avail_state()
458 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n", in qm_qp_avail_state()
462 dev_warn(&qm->pdev->dev, in qm_qp_avail_state()
469 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
474 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, in qm_wait_mb_ready()
482 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; in qm_mb_write()
507 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n", in qm_mb()
518 mutex_lock(&qm->mailbox_lock); in qm_mb()
521 ret = -EBUSY; in qm_mb()
522 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); in qm_mb()
529 ret = -EBUSY; in qm_mb()
530 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); in qm_mb()
535 mutex_unlock(&qm->mailbox_lock); in qm_mb()
538 atomic64_inc(&qm->debug.dfx.mb_err_cnt); in qm_mb()
550 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); in qm_db_v1()
569 writeq(doorbell, qm->io_base + dbase); in qm_db_v2()
574 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", in qm_db()
577 qm->ops->qm_db(qm, qn, cmd, index, priority); in qm_db()
584 writel(0x1, qm->io_base + QM_MEM_START_INIT); in qm_dev_mem_reset()
585 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, in qm_dev_mem_reset()
596 if (qm->fun_type == QM_HW_PF) in qm_get_irq_num_v2()
604 u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; in qm_to_hisi_qp()
606 return &qm->qp_array[cqn]; in qm_to_hisi_qp()
611 if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) { in qm_cq_head_update()
612 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; in qm_cq_head_update()
613 qp->qp_status.cq_head = 0; in qm_cq_head_update()
615 qp->qp_status.cq_head++; in qm_cq_head_update()
621 if (qp->event_cb) { in qm_poll_qp()
622 qp->event_cb(qp); in qm_poll_qp()
626 if (qp->req_cb) { in qm_poll_qp()
627 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; in qm_poll_qp()
629 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { in qm_poll_qp()
631 qp->req_cb(qp, qp->sqe + qm->sqe_size * in qm_poll_qp()
632 le16_to_cpu(cqe->sq_head)); in qm_poll_qp()
634 cqe = qp->cqe + qp->qp_status.cq_head; in qm_poll_qp()
635 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, in qm_poll_qp()
636 qp->qp_status.cq_head, 0); in qm_poll_qp()
637 atomic_dec(&qp->qp_status.used); in qm_poll_qp()
641 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, in qm_poll_qp()
642 qp->qp_status.cq_head, 1); in qm_poll_qp()
649 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; in qm_work_process()
653 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { in qm_work_process()
658 if (qm->status.eq_head == QM_EQ_DEPTH - 1) { in qm_work_process()
659 qm->status.eqc_phase = !qm->status.eqc_phase; in qm_work_process()
660 eqe = qm->eqe; in qm_work_process()
661 qm->status.eq_head = 0; in qm_work_process()
664 qm->status.eq_head++; in qm_work_process()
667 if (eqe_num == QM_EQ_DEPTH / 2 - 1) { in qm_work_process()
669 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_work_process()
673 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_work_process()
681 if (qm->wq) in do_qm_irq()
682 queue_work(qm->wq, &qm->work); in do_qm_irq()
684 schedule_work(&qm->work); in do_qm_irq()
693 if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) in qm_irq()
696 atomic64_inc(&qm->debug.dfx.err_irq_cnt); in qm_irq()
697 dev_err(&qm->pdev->dev, "invalid int source\n"); in qm_irq()
698 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_irq()
706 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; in qm_aeq_irq()
709 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); in qm_aeq_irq()
710 if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE)) in qm_aeq_irq()
713 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { in qm_aeq_irq()
714 type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT; in qm_aeq_irq()
716 dev_err(&qm->pdev->dev, "%s overflow\n", in qm_aeq_irq()
719 dev_err(&qm->pdev->dev, "unknown error type %d\n", in qm_aeq_irq()
722 if (qm->status.aeq_head == QM_Q_DEPTH - 1) { in qm_aeq_irq()
723 qm->status.aeqc_phase = !qm->status.aeqc_phase; in qm_aeq_irq()
724 aeqe = qm->aeqe; in qm_aeq_irq()
725 qm->status.aeq_head = 0; in qm_aeq_irq()
728 qm->status.aeq_head++; in qm_aeq_irq()
731 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); in qm_aeq_irq()
739 struct pci_dev *pdev = qm->pdev; in qm_irq_unregister()
743 if (qm->ver == QM_HW_V1) in qm_irq_unregister()
748 if (qm->fun_type == QM_HW_PF) in qm_irq_unregister()
755 struct hisi_qp_status *qp_status = &qp->qp_status; in qm_init_qp_status()
757 qp_status->sq_tail = 0; in qm_init_qp_status()
758 qp_status->cq_head = 0; in qm_init_qp_status()
759 qp_status->cqc_phase = true; in qm_init_qp_status()
760 atomic_set(&qp_status->used, 0); in qm_init_qp_status()
771 if (qm->ver == QM_HW_V1) { in qm_vft_data_cfg()
780 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; in qm_vft_data_cfg()
784 if (qm->ver == QM_HW_V1) { in qm_vft_data_cfg()
796 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); in qm_vft_data_cfg()
797 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); in qm_vft_data_cfg()
806 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_set_vft_common()
811 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); in qm_set_vft_common()
812 writel(type, qm->io_base + QM_VFT_CFG_TYPE); in qm_set_vft_common()
813 writel(fun_num, qm->io_base + QM_VFT_CFG); in qm_set_vft_common()
817 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); in qm_set_vft_common()
818 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); in qm_set_vft_common()
820 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_set_vft_common()
848 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | in qm_get_vft_v2()
849 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); in qm_get_vft_v2()
859 struct qm_debug *debug = file->debug; in file_to_qm()
868 return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT; in current_q_read()
876 if (val >= qm->debug.curr_qm_qp_num) in current_q_write()
877 return -EINVAL; in current_q_write()
880 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK); in current_q_write()
881 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); in current_q_write()
884 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK); in current_q_write()
885 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); in current_q_write()
894 return readl(qm->io_base + QM_DFX_CNT_CLR_CE); in clear_enable_read()
903 return -EINVAL; in clear_enable_write()
905 writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE); in clear_enable_write()
913 struct debugfs_file *file = filp->private_data; in qm_debug_read()
914 enum qm_debug_file index = file->index; in qm_debug_read()
919 mutex_lock(&file->lock); in qm_debug_read()
928 mutex_unlock(&file->lock); in qm_debug_read()
929 return -EINVAL; in qm_debug_read()
931 mutex_unlock(&file->lock); in qm_debug_read()
939 struct debugfs_file *file = filp->private_data; in qm_debug_write()
940 enum qm_debug_file index = file->index; in qm_debug_write()
949 return -ENOSPC; in qm_debug_write()
951 len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf, in qm_debug_write()
958 return -EFAULT; in qm_debug_write()
960 mutex_lock(&file->lock); in qm_debug_write()
973 ret = -EINVAL; in qm_debug_write()
976 mutex_unlock(&file->lock); in qm_debug_write()
981 mutex_unlock(&file->lock); in qm_debug_write()
1034 struct hisi_qm *qm = s->private; in qm_regs_show()
1038 if (qm->fun_type == QM_HW_PF) in qm_regs_show()
1043 while (regs->reg_name) { in qm_regs_show()
1044 val = readl(qm->io_base + regs->reg_offset); in qm_regs_show()
1045 seq_printf(s, "%s= 0x%08x\n", regs->reg_name, val); in qm_regs_show()
1069 struct device *dev = &qm->pdev->dev; in qm_ctx_alloc()
1074 return ERR_PTR(-ENOMEM); in qm_ctx_alloc()
1080 return ERR_PTR(-ENOMEM); in qm_ctx_alloc()
1089 struct device *dev = &qm->pdev->dev; in qm_ctx_free()
1098 struct device *dev = &qm->pdev->dev; in dump_show()
1105 return -ENOMEM; in dump_show()
1113 info_buf[i - 1] = *info_curr; in dump_show()
1115 info_buf[i - 3] = *info_curr; in dump_show()
1142 struct device *dev = &qm->pdev->dev; in qm_sqc_dump()
1149 return -EINVAL; in qm_sqc_dump()
1152 if (ret || qp_id >= qm->qp_num) { in qm_sqc_dump()
1153 dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1); in qm_sqc_dump()
1154 return -EINVAL; in qm_sqc_dump()
1163 down_read(&qm->qps_lock); in qm_sqc_dump()
1164 if (qm->sqc) { in qm_sqc_dump()
1165 sqc_curr = qm->sqc + qp_id; in qm_sqc_dump()
1172 up_read(&qm->qps_lock); in qm_sqc_dump()
1188 struct device *dev = &qm->pdev->dev; in qm_cqc_dump()
1195 return -EINVAL; in qm_cqc_dump()
1198 if (ret || qp_id >= qm->qp_num) { in qm_cqc_dump()
1199 dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1); in qm_cqc_dump()
1200 return -EINVAL; in qm_cqc_dump()
1209 down_read(&qm->qps_lock); in qm_cqc_dump()
1210 if (qm->cqc) { in qm_cqc_dump()
1211 cqc_curr = qm->cqc + qp_id; in qm_cqc_dump()
1218 up_read(&qm->qps_lock); in qm_cqc_dump()
1235 struct device *dev = &qm->pdev->dev; in qm_eqc_aeqc_dump()
1242 return -EINVAL; in qm_eqc_aeqc_dump()
1265 struct device *dev = &qm->pdev->dev; in q_dump_param_parse()
1266 unsigned int qp_num = qm->qp_num; in q_dump_param_parse()
1273 return -EINVAL; in q_dump_param_parse()
1278 dev_err(dev, "Please input qp num (0-%d)", qp_num - 1); in q_dump_param_parse()
1279 return -EINVAL; in q_dump_param_parse()
1285 return -EINVAL; in q_dump_param_parse()
1290 dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1); in q_dump_param_parse()
1291 return -EINVAL; in q_dump_param_parse()
1296 return -EINVAL; in q_dump_param_parse()
1304 struct device *dev = &qm->pdev->dev; in qm_sq_dump()
1314 sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL); in qm_sq_dump()
1316 return -ENOMEM; in qm_sq_dump()
1318 qp = &qm->qp_array[qp_id]; in qm_sq_dump()
1319 memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH); in qm_sq_dump()
1320 sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size); in qm_sq_dump()
1321 memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK, in qm_sq_dump()
1322 qm->debug.sqe_mask_len); in qm_sq_dump()
1324 ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE"); in qm_sq_dump()
1335 struct device *dev = &qm->pdev->dev; in qm_cq_dump()
1345 qp = &qm->qp_array[qp_id]; in qm_cq_dump()
1346 cqe_curr = qp->cqe + cqe_id; in qm_cq_dump()
1357 struct device *dev = &qm->pdev->dev; in qm_eq_aeq_dump()
1363 return -EINVAL; in qm_eq_aeq_dump()
1367 return -EINVAL; in qm_eq_aeq_dump()
1370 dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1); in qm_eq_aeq_dump()
1371 return -EINVAL; in qm_eq_aeq_dump()
1373 dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1); in qm_eq_aeq_dump()
1374 return -EINVAL; in qm_eq_aeq_dump()
1377 down_read(&qm->qps_lock); in qm_eq_aeq_dump()
1379 if (qm->eqe && !strcmp(name, "EQE")) { in qm_eq_aeq_dump()
1380 xeqe = qm->eqe + xeqe_id; in qm_eq_aeq_dump()
1381 } else if (qm->aeqe && !strcmp(name, "AEQE")) { in qm_eq_aeq_dump()
1382 xeqe = qm->aeqe + xeqe_id; in qm_eq_aeq_dump()
1384 ret = -EINVAL; in qm_eq_aeq_dump()
1393 up_read(&qm->qps_lock); in qm_eq_aeq_dump()
1399 struct device *dev = &qm->pdev->dev; in qm_dbg_help()
1403 return -EINVAL; in qm_dbg_help()
1421 struct device *dev = &qm->pdev->dev; in qm_cmd_write_dump()
1427 return -ENOMEM; in qm_cmd_write_dump()
1432 ret = -EINVAL; in qm_cmd_write_dump()
1457 ret = -EINVAL; in qm_cmd_write_dump()
1471 struct hisi_qm *qm = filp->private_data; in qm_cmd_write()
1479 if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) in qm_cmd_write()
1483 return -ENOSPC; in qm_cmd_write()
1487 return -ENOMEM; in qm_cmd_write()
1491 return -EFAULT; in qm_cmd_write()
1499 count = cmd_buf_tmp - cmd_buf + 1; in qm_cmd_write()
1522 struct dentry *qm_d = qm->debug.qm_d; in qm_create_debugfs_file()
1523 struct debugfs_file *file = qm->debug.files + index; in qm_create_debugfs_file()
1528 file->index = index; in qm_create_debugfs_file()
1529 mutex_init(&file->lock); in qm_create_debugfs_file()
1530 file->debug = &qm->debug; in qm_create_debugfs_file()
1537 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v1()
1545 qm->error_mask = ce | nfe | fe; in qm_hw_error_init_v2()
1549 qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_hw_error_init_v2()
1552 writel(ce, qm->io_base + QM_RAS_CE_ENABLE); in qm_hw_error_init_v2()
1553 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); in qm_hw_error_init_v2()
1554 writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE); in qm_hw_error_init_v2()
1555 writel(fe, qm->io_base + QM_RAS_FE_ENABLE); in qm_hw_error_init_v2()
1557 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v2()
1558 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v2()
1563 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v2()
1569 struct device *dev = &qm->pdev->dev; in qm_log_hw_error()
1575 if (!(err->int_msk & error_status)) in qm_log_hw_error()
1579 err->msg, err->int_msk); in qm_log_hw_error()
1581 if (err->int_msk & QM_DB_TIMEOUT) { in qm_log_hw_error()
1582 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01); in qm_log_hw_error()
1588 } else if (err->int_msk & QM_OF_FIFO_OF) { in qm_log_hw_error()
1589 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00); in qm_log_hw_error()
1608 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); in qm_hw_error_handle_v2()
1609 error_status = qm->error_mask & tmp; in qm_hw_error_handle_v2()
1613 qm->err_status.is_qm_ecc_mbit = true; in qm_hw_error_handle_v2()
1617 writel(error_status, qm->io_base + in qm_hw_error_handle_v2()
1645 struct hisi_qp_status *qp_status = &qp->qp_status; in qm_get_avail_sqe()
1646 u16 sq_tail = qp_status->sq_tail; in qm_get_avail_sqe()
1648 if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1)) in qm_get_avail_sqe()
1651 return qp->sqe + sq_tail * qp->qm->sqe_size; in qm_get_avail_sqe()
1656 struct device *dev = &qm->pdev->dev; in qm_create_qp_nolock()
1661 return ERR_PTR(-EPERM); in qm_create_qp_nolock()
1663 if (qm->qp_in_used == qm->qp_num) { in qm_create_qp_nolock()
1665 qm->qp_num); in qm_create_qp_nolock()
1666 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); in qm_create_qp_nolock()
1667 return ERR_PTR(-EBUSY); in qm_create_qp_nolock()
1670 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); in qm_create_qp_nolock()
1673 qm->qp_num); in qm_create_qp_nolock()
1674 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); in qm_create_qp_nolock()
1675 return ERR_PTR(-EBUSY); in qm_create_qp_nolock()
1678 qp = &qm->qp_array[qp_id]; in qm_create_qp_nolock()
1680 memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH); in qm_create_qp_nolock()
1682 qp->event_cb = NULL; in qm_create_qp_nolock()
1683 qp->req_cb = NULL; in qm_create_qp_nolock()
1684 qp->qp_id = qp_id; in qm_create_qp_nolock()
1685 qp->alg_type = alg_type; in qm_create_qp_nolock()
1686 qm->qp_in_used++; in qm_create_qp_nolock()
1687 atomic_set(&qp->qp_status.flags, QP_INIT); in qm_create_qp_nolock()
1693 * hisi_qm_create_qp() - Create a queue pair from qm.
1697 * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
1704 down_write(&qm->qps_lock); in hisi_qm_create_qp()
1706 up_write(&qm->qps_lock); in hisi_qm_create_qp()
1713 * hisi_qm_release_qp() - Release a qp back to its qm.
1720 struct hisi_qm *qm = qp->qm; in hisi_qm_release_qp()
1722 down_write(&qm->qps_lock); in hisi_qm_release_qp()
1725 up_write(&qm->qps_lock); in hisi_qm_release_qp()
1729 qm->qp_in_used--; in hisi_qm_release_qp()
1730 idr_remove(&qm->qp_idr, qp->qp_id); in hisi_qm_release_qp()
1732 up_write(&qm->qps_lock); in hisi_qm_release_qp()
1738 struct hisi_qm *qm = qp->qm; in qm_qp_ctx_cfg()
1739 struct device *dev = &qm->pdev->dev; in qm_qp_ctx_cfg()
1740 enum qm_hw_ver ver = qm->ver; in qm_qp_ctx_cfg()
1751 return -ENOMEM; in qm_qp_ctx_cfg()
1756 return -ENOMEM; in qm_qp_ctx_cfg()
1759 INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); in qm_qp_ctx_cfg()
1761 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); in qm_qp_ctx_cfg()
1762 sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); in qm_qp_ctx_cfg()
1764 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size)); in qm_qp_ctx_cfg()
1765 sqc->w8 = 0; /* rand_qc */ in qm_qp_ctx_cfg()
1767 sqc->cq_num = cpu_to_le16(qp_id); in qm_qp_ctx_cfg()
1768 sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); in qm_qp_ctx_cfg()
1778 return -ENOMEM; in qm_qp_ctx_cfg()
1783 return -ENOMEM; in qm_qp_ctx_cfg()
1786 INIT_QC_COMMON(cqc, qp->cqe_dma, pasid); in qm_qp_ctx_cfg()
1788 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 4)); in qm_qp_ctx_cfg()
1789 cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); in qm_qp_ctx_cfg()
1791 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(4)); in qm_qp_ctx_cfg()
1792 cqc->w8 = 0; in qm_qp_ctx_cfg()
1794 cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); in qm_qp_ctx_cfg()
1805 struct hisi_qm *qm = qp->qm; in qm_start_qp_nolock()
1806 struct device *dev = &qm->pdev->dev; in qm_start_qp_nolock()
1807 int qp_id = qp->qp_id; in qm_start_qp_nolock()
1812 return -EPERM; in qm_start_qp_nolock()
1818 atomic_set(&qp->qp_status.flags, QP_START); in qm_start_qp_nolock()
1825 * hisi_qm_start_qp() - Start a qp into running.
1830 * successful, Return -EBUSY if failed.
1834 struct hisi_qm *qm = qp->qm; in hisi_qm_start_qp()
1837 down_write(&qm->qps_lock); in hisi_qm_start_qp()
1839 up_write(&qm->qps_lock); in hisi_qm_start_qp()
1852 struct hisi_qm *qm = qp->qm; in qm_drain_qp()
1853 struct device *dev = &qm->pdev->dev; in qm_drain_qp()
1861 * No need to judge if ECC multi-bit error occurs because the in qm_drain_qp()
1864 if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit) in qm_drain_qp()
1870 return -ENOMEM; in qm_drain_qp()
1874 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id); in qm_drain_qp()
1882 qp->qp_id); in qm_drain_qp()
1889 if ((sqc->tail == cqc->tail) && in qm_drain_qp()
1894 dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id); in qm_drain_qp()
1895 ret = -EBUSY; in qm_drain_qp()
1909 struct device *dev = &qp->qm->pdev->dev; in qm_stop_qp_nolock()
1918 if (atomic_read(&qp->qp_status.flags) == QP_STOP) { in qm_stop_qp_nolock()
1919 qp->is_resetting = false; in qm_stop_qp_nolock()
1923 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP)) in qm_stop_qp_nolock()
1924 return -EPERM; in qm_stop_qp_nolock()
1926 atomic_set(&qp->qp_status.flags, QP_STOP); in qm_stop_qp_nolock()
1932 if (qp->qm->wq) in qm_stop_qp_nolock()
1933 flush_workqueue(qp->qm->wq); in qm_stop_qp_nolock()
1935 flush_work(&qp->qm->work); in qm_stop_qp_nolock()
1937 dev_dbg(dev, "stop queue %u!", qp->qp_id); in qm_stop_qp_nolock()
1943 * hisi_qm_stop_qp() - Stop a qp in qm.
1952 down_write(&qp->qm->qps_lock); in hisi_qm_stop_qp()
1954 up_write(&qp->qm->qps_lock); in hisi_qm_stop_qp()
1961 * hisi_qp_send() - Queue up a task in the hardware queue.
1965 * This function will return -EBUSY if qp is currently full, and -EAGAIN
1977 struct hisi_qp_status *qp_status = &qp->qp_status; in hisi_qp_send()
1978 u16 sq_tail = qp_status->sq_tail; in hisi_qp_send()
1982 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || in hisi_qp_send()
1983 atomic_read(&qp->qm->status.flags) == QM_STOP || in hisi_qp_send()
1984 qp->is_resetting)) { in hisi_qp_send()
1985 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); in hisi_qp_send()
1986 return -EAGAIN; in hisi_qp_send()
1990 return -EBUSY; in hisi_qp_send()
1992 memcpy(sqe, msg, qp->qm->sqe_size); in hisi_qp_send()
1994 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); in hisi_qp_send()
1995 atomic_inc(&qp->qp_status.used); in hisi_qp_send()
1996 qp_status->sq_tail = sq_tail_next; in hisi_qp_send()
2006 if (qm->ver == QM_HW_V1) in hisi_qm_cache_wb()
2009 writel(0x1, qm->io_base + QM_CACHE_WB_START); in hisi_qm_cache_wb()
2010 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, in hisi_qm_cache_wb()
2012 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); in hisi_qm_cache_wb()
2017 wake_up_interruptible(&qp->uacce_q->wait); in qm_qp_event_notifier()
2022 return hisi_qm_get_free_qp_num(uacce->priv); in hisi_qm_get_available_instances()
2029 struct hisi_qm *qm = uacce->priv; in hisi_qm_uacce_get_queue()
2037 q->priv = qp; in hisi_qm_uacce_get_queue()
2038 q->uacce = uacce; in hisi_qm_uacce_get_queue()
2039 qp->uacce_q = q; in hisi_qm_uacce_get_queue()
2040 qp->event_cb = qm_qp_event_notifier; in hisi_qm_uacce_get_queue()
2041 qp->pasid = arg; in hisi_qm_uacce_get_queue()
2048 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_put_queue()
2050 hisi_qm_cache_wb(qp->qm); in hisi_qm_uacce_put_queue()
2059 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_mmap()
2060 struct hisi_qm *qm = qp->qm; in hisi_qm_uacce_mmap()
2061 size_t sz = vma->vm_end - vma->vm_start; in hisi_qm_uacce_mmap()
2062 struct pci_dev *pdev = qm->pdev; in hisi_qm_uacce_mmap()
2063 struct device *dev = &pdev->dev; in hisi_qm_uacce_mmap()
2067 switch (qfr->type) { in hisi_qm_uacce_mmap()
2069 if (qm->ver == QM_HW_V1) { in hisi_qm_uacce_mmap()
2071 return -EINVAL; in hisi_qm_uacce_mmap()
2075 return -EINVAL; in hisi_qm_uacce_mmap()
2078 vma->vm_flags |= VM_IO; in hisi_qm_uacce_mmap()
2080 return remap_pfn_range(vma, vma->vm_start, in hisi_qm_uacce_mmap()
2081 qm->phys_base >> PAGE_SHIFT, in hisi_qm_uacce_mmap()
2082 sz, pgprot_noncached(vma->vm_page_prot)); in hisi_qm_uacce_mmap()
2084 if (sz != qp->qdma.size) in hisi_qm_uacce_mmap()
2085 return -EINVAL; in hisi_qm_uacce_mmap()
2091 vm_pgoff = vma->vm_pgoff; in hisi_qm_uacce_mmap()
2092 vma->vm_pgoff = 0; in hisi_qm_uacce_mmap()
2093 ret = dma_mmap_coherent(dev, vma, qp->qdma.va, in hisi_qm_uacce_mmap()
2094 qp->qdma.dma, sz); in hisi_qm_uacce_mmap()
2095 vma->vm_pgoff = vm_pgoff; in hisi_qm_uacce_mmap()
2099 return -EINVAL; in hisi_qm_uacce_mmap()
2105 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_start_queue()
2107 return hisi_qm_start_qp(qp, qp->pasid); in hisi_qm_uacce_start_queue()
2112 hisi_qm_stop_qp(q->priv); in hisi_qm_uacce_stop_queue()
2117 struct hisi_qm *qm = q->uacce->priv; in qm_set_sqctype()
2118 struct hisi_qp *qp = q->priv; in qm_set_sqctype()
2120 down_write(&qm->qps_lock); in qm_set_sqctype()
2121 qp->alg_type = type; in qm_set_sqctype()
2122 up_write(&qm->qps_lock); in qm_set_sqctype()
2130 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_ioctl()
2136 return -EFAULT; in hisi_qm_uacce_ioctl()
2139 return -EINVAL; in hisi_qm_uacce_ioctl()
2142 qp_ctx.id = qp->qp_id; in hisi_qm_uacce_ioctl()
2146 return -EFAULT; in hisi_qm_uacce_ioctl()
2148 return -EINVAL; in hisi_qm_uacce_ioctl()
2166 struct pci_dev *pdev = qm->pdev; in qm_alloc_uacce()
2176 ret = strscpy(interface.name, pdev->driver->name, in qm_alloc_uacce()
2179 return -ENAMETOOLONG; in qm_alloc_uacce()
2181 uacce = uacce_alloc(&pdev->dev, &interface); in qm_alloc_uacce()
2185 if (uacce->flags & UACCE_DEV_SVA) { in qm_alloc_uacce()
2186 qm->use_sva = true; in qm_alloc_uacce()
2190 qm->uacce = NULL; in qm_alloc_uacce()
2191 return -EINVAL; in qm_alloc_uacce()
2194 uacce->is_vf = pdev->is_virtfn; in qm_alloc_uacce()
2195 uacce->priv = qm; in qm_alloc_uacce()
2196 uacce->algs = qm->algs; in qm_alloc_uacce()
2198 if (qm->ver == QM_HW_V1) { in qm_alloc_uacce()
2200 uacce->api_ver = HISI_QM_API_VER_BASE; in qm_alloc_uacce()
2204 uacce->api_ver = HISI_QM_API_VER2_BASE; in qm_alloc_uacce()
2207 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH + in qm_alloc_uacce()
2210 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; in qm_alloc_uacce()
2211 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; in qm_alloc_uacce()
2213 qm->uacce = uacce; in qm_alloc_uacce()
2219 * qm_frozen() - Try to froze QM to cut continuous queue request. If
2227 down_write(&qm->qps_lock); in qm_frozen()
2229 if (qm->is_frozen) { in qm_frozen()
2230 up_write(&qm->qps_lock); in qm_frozen()
2234 if (!qm->qp_in_used) { in qm_frozen()
2235 qm->qp_in_used = qm->qp_num; in qm_frozen()
2236 qm->is_frozen = true; in qm_frozen()
2237 up_write(&qm->qps_lock); in qm_frozen()
2241 up_write(&qm->qps_lock); in qm_frozen()
2243 return -EBUSY; in qm_frozen()
2254 return -EINVAL; in qm_try_frozen_vfs()
2257 mutex_lock(&qm_list->lock); in qm_try_frozen_vfs()
2258 list_for_each_entry(qm, &qm_list->list, list) { in qm_try_frozen_vfs()
2259 dev = qm->pdev; in qm_try_frozen_vfs()
2271 mutex_unlock(&qm_list->lock); in qm_try_frozen_vfs()
2277 * hisi_qm_wait_task_finish() - Wait until the task is finished
2285 ((qm->fun_type == QM_HW_PF) && in hisi_qm_wait_task_finish()
2286 qm_try_frozen_vfs(qm->pdev, qm_list))) { in hisi_qm_wait_task_finish()
2295 * hisi_qm_get_free_qp_num() - Get free number of qp in qm.
2304 down_read(&qm->qps_lock); in hisi_qm_get_free_qp_num()
2305 ret = qm->qp_num - qm->qp_in_used; in hisi_qm_get_free_qp_num()
2306 up_read(&qm->qps_lock); in hisi_qm_get_free_qp_num()
2314 struct device *dev = &qm->pdev->dev; in hisi_qp_memory_uninit()
2318 for (i = num - 1; i >= 0; i--) { in hisi_qp_memory_uninit()
2319 qdma = &qm->qp_array[i].qdma; in hisi_qp_memory_uninit()
2320 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); in hisi_qp_memory_uninit()
2323 kfree(qm->qp_array); in hisi_qp_memory_uninit()
2328 struct device *dev = &qm->pdev->dev; in hisi_qp_memory_init()
2329 size_t off = qm->sqe_size * QM_Q_DEPTH; in hisi_qp_memory_init()
2332 qp = &qm->qp_array[id]; in hisi_qp_memory_init()
2333 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, in hisi_qp_memory_init()
2335 if (!qp->qdma.va) in hisi_qp_memory_init()
2336 return -ENOMEM; in hisi_qp_memory_init()
2338 qp->sqe = qp->qdma.va; in hisi_qp_memory_init()
2339 qp->sqe_dma = qp->qdma.dma; in hisi_qp_memory_init()
2340 qp->cqe = qp->qdma.va + off; in hisi_qp_memory_init()
2341 qp->cqe_dma = qp->qdma.dma + off; in hisi_qp_memory_init()
2342 qp->qdma.size = dma_size; in hisi_qp_memory_init()
2343 qp->qm = qm; in hisi_qp_memory_init()
2344 qp->qp_id = id; in hisi_qp_memory_init()
2351 struct device *dev = &qm->pdev->dev; in hisi_qm_memory_init()
2356 (qm)->type = ((qm)->qdma.va + (off)); \ in hisi_qm_memory_init()
2357 (qm)->type##_dma = (qm)->qdma.dma + (off); \ in hisi_qm_memory_init()
2361 idr_init(&qm->qp_idr); in hisi_qm_memory_init()
2362 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) + in hisi_qm_memory_init()
2364 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + in hisi_qm_memory_init()
2365 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); in hisi_qm_memory_init()
2366 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, in hisi_qm_memory_init()
2368 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); in hisi_qm_memory_init()
2369 if (!qm->qdma.va) in hisi_qm_memory_init()
2370 return -ENOMEM; in hisi_qm_memory_init()
2374 QM_INIT_BUF(qm, sqc, qm->qp_num); in hisi_qm_memory_init()
2375 QM_INIT_BUF(qm, cqc, qm->qp_num); in hisi_qm_memory_init()
2377 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); in hisi_qm_memory_init()
2378 if (!qm->qp_array) { in hisi_qm_memory_init()
2379 ret = -ENOMEM; in hisi_qm_memory_init()
2384 qp_dma_size = qm->sqe_size * QM_Q_DEPTH + in hisi_qm_memory_init()
2387 for (i = 0; i < qm->qp_num; i++) { in hisi_qm_memory_init()
2400 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); in hisi_qm_memory_init()
2407 struct pci_dev *pdev = qm->pdev; in hisi_qm_pre_init()
2409 if (qm->ver == QM_HW_V1) in hisi_qm_pre_init()
2410 qm->ops = &qm_hw_ops_v1; in hisi_qm_pre_init()
2412 qm->ops = &qm_hw_ops_v2; in hisi_qm_pre_init()
2415 mutex_init(&qm->mailbox_lock); in hisi_qm_pre_init()
2416 init_rwsem(&qm->qps_lock); in hisi_qm_pre_init()
2417 qm->qp_in_used = 0; in hisi_qm_pre_init()
2418 qm->is_frozen = false; in hisi_qm_pre_init()
2422 * hisi_qm_uninit() - Uninitialize qm.
2429 struct pci_dev *pdev = qm->pdev; in hisi_qm_uninit()
2430 struct device *dev = &pdev->dev; in hisi_qm_uninit()
2432 down_write(&qm->qps_lock); in hisi_qm_uninit()
2435 up_write(&qm->qps_lock); in hisi_qm_uninit()
2439 uacce_remove(qm->uacce); in hisi_qm_uninit()
2440 qm->uacce = NULL; in hisi_qm_uninit()
2442 hisi_qp_memory_uninit(qm, qm->qp_num); in hisi_qm_uninit()
2443 idr_destroy(&qm->qp_idr); in hisi_qm_uninit()
2445 if (qm->qdma.va) { in hisi_qm_uninit()
2447 dma_free_coherent(dev, qm->qdma.size, in hisi_qm_uninit()
2448 qm->qdma.va, qm->qdma.dma); in hisi_qm_uninit()
2449 memset(&qm->qdma, 0, sizeof(qm->qdma)); in hisi_qm_uninit()
2454 iounmap(qm->io_base); in hisi_qm_uninit()
2458 up_write(&qm->qps_lock); in hisi_qm_uninit()
2463 * hisi_qm_get_vft() - Get vft from a qm.
2477 return -EINVAL; in hisi_qm_get_vft()
2479 if (!qm->ops->get_vft) { in hisi_qm_get_vft()
2480 dev_err(&qm->pdev->dev, "Don't support vft read!\n"); in hisi_qm_get_vft()
2481 return -EINVAL; in hisi_qm_get_vft()
2484 return qm->ops->get_vft(qm, base, number); in hisi_qm_get_vft()
2492 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
2493 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
2499 u32 max_q_num = qm->ctrl_qp_num; in hisi_qm_set_vft()
2503 return -EINVAL; in hisi_qm_set_vft()
2510 struct hisi_qm_status *status = &qm->status; in qm_init_eq_aeq_status()
2512 status->eq_head = 0; in qm_init_eq_aeq_status()
2513 status->aeq_head = 0; in qm_init_eq_aeq_status()
2514 status->eqc_phase = true; in qm_init_eq_aeq_status()
2515 status->aeqc_phase = true; in qm_init_eq_aeq_status()
2520 struct device *dev = &qm->pdev->dev; in qm_eq_ctx_cfg()
2531 return -ENOMEM; in qm_eq_ctx_cfg()
2536 return -ENOMEM; in qm_eq_ctx_cfg()
2539 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); in qm_eq_ctx_cfg()
2540 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); in qm_eq_ctx_cfg()
2541 if (qm->ver == QM_HW_V1) in qm_eq_ctx_cfg()
2542 eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); in qm_eq_ctx_cfg()
2543 eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); in qm_eq_ctx_cfg()
2552 return -ENOMEM; in qm_eq_ctx_cfg()
2557 return -ENOMEM; in qm_eq_ctx_cfg()
2560 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); in qm_eq_ctx_cfg()
2561 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); in qm_eq_ctx_cfg()
2562 aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); in qm_eq_ctx_cfg()
2575 WARN_ON(!qm->qdma.dma); in __hisi_qm_start()
2577 if (qm->fun_type == QM_HW_PF) { in __hisi_qm_start()
2582 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); in __hisi_qm_start()
2591 ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); in __hisi_qm_start()
2595 ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); in __hisi_qm_start()
2599 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); in __hisi_qm_start()
2600 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); in __hisi_qm_start()
2606 * hisi_qm_start() - start qm
2613 struct device *dev = &qm->pdev->dev; in hisi_qm_start()
2616 down_write(&qm->qps_lock); in hisi_qm_start()
2619 up_write(&qm->qps_lock); in hisi_qm_start()
2620 return -EPERM; in hisi_qm_start()
2623 dev_dbg(dev, "qm start with %d queue pairs\n", qm->qp_num); in hisi_qm_start()
2625 if (!qm->qp_num) { in hisi_qm_start()
2627 ret = -EINVAL; in hisi_qm_start()
2633 atomic_set(&qm->status.flags, QM_START); in hisi_qm_start()
2636 up_write(&qm->qps_lock); in hisi_qm_start()
2643 struct device *dev = &qm->pdev->dev; in qm_restart()
2651 down_write(&qm->qps_lock); in qm_restart()
2652 for (i = 0; i < qm->qp_num; i++) { in qm_restart()
2653 qp = &qm->qp_array[i]; in qm_restart()
2654 if (atomic_read(&qp->qp_status.flags) == QP_STOP && in qm_restart()
2655 qp->is_resetting == true) { in qm_restart()
2660 up_write(&qm->qps_lock); in qm_restart()
2663 qp->is_resetting = false; in qm_restart()
2666 up_write(&qm->qps_lock); in qm_restart()
2674 struct device *dev = &qm->pdev->dev; in qm_stop_started_qp()
2678 for (i = 0; i < qm->qp_num; i++) { in qm_stop_started_qp()
2679 qp = &qm->qp_array[i]; in qm_stop_started_qp()
2680 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) { in qm_stop_started_qp()
2681 qp->is_resetting = true; in qm_stop_started_qp()
2702 for (i = 0; i < qm->qp_num; i++) { in qm_clear_queues()
2703 qp = &qm->qp_array[i]; in qm_clear_queues()
2704 if (qp->is_resetting) in qm_clear_queues()
2705 memset(qp->qdma.va, 0, qp->qdma.size); in qm_clear_queues()
2708 memset(qm->qdma.va, 0, qm->qdma.size); in qm_clear_queues()
2712 * hisi_qm_stop() - Stop a qm.
2722 struct device *dev = &qm->pdev->dev; in hisi_qm_stop()
2725 down_write(&qm->qps_lock); in hisi_qm_stop()
2727 qm->status.stop_reason = r; in hisi_qm_stop()
2729 ret = -EPERM; in hisi_qm_stop()
2733 if (qm->status.stop_reason == QM_SOFT_RESET || in hisi_qm_stop()
2734 qm->status.stop_reason == QM_FLR) { in hisi_qm_stop()
2743 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); in hisi_qm_stop()
2744 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); in hisi_qm_stop()
2746 if (qm->fun_type == QM_HW_PF) { in hisi_qm_stop()
2750 ret = -EBUSY; in hisi_qm_stop()
2756 atomic_set(&qm->status.flags, QM_STOP); in hisi_qm_stop()
2759 up_write(&qm->qps_lock); in hisi_qm_stop()
2767 struct hisi_qm *qm = filp->private_data; in qm_status_read()
2771 val = atomic_read(&qm->status.flags); in qm_status_read()
2786 return -EINVAL; in qm_debugfs_atomic64_set()
2804 * hisi_qm_debug_init() - Initialize qm related debugfs files.
2811 struct qm_dfx *dfx = &qm->debug.dfx; in hisi_qm_debug_init() local
2816 qm_d = debugfs_create_dir("qm", qm->debug.debug_root); in hisi_qm_debug_init()
2817 qm->debug.qm_d = qm_d; in hisi_qm_debug_init()
2820 if (qm->fun_type == QM_HW_PF) in hisi_qm_debug_init()
2823 ret = -ENOENT; in hisi_qm_debug_init()
2827 debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); in hisi_qm_debug_init()
2829 debugfs_create_file("cmd", 0444, qm->debug.qm_d, qm, &qm_cmd_fops); in hisi_qm_debug_init()
2831 debugfs_create_file("status", 0444, qm->debug.qm_d, qm, in hisi_qm_debug_init()
2834 data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset); in hisi_qm_debug_init()
2851 * hisi_qm_debug_regs_clear() - clear qm debug related registers.
2860 writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); in hisi_qm_debug_regs_clear()
2861 writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); in hisi_qm_debug_regs_clear()
2867 writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE); in hisi_qm_debug_regs_clear()
2871 readl(qm->io_base + regs->reg_offset); in hisi_qm_debug_regs_clear()
2875 writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE); in hisi_qm_debug_regs_clear()
2881 const struct hisi_qm_err_info *err_info = &qm->err_ini->err_info; in qm_hw_error_init()
2883 if (!qm->ops->hw_error_init) { in qm_hw_error_init()
2884 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); in qm_hw_error_init()
2888 qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe); in qm_hw_error_init()
2893 if (!qm->ops->hw_error_uninit) { in qm_hw_error_uninit()
2894 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n"); in qm_hw_error_uninit()
2898 qm->ops->hw_error_uninit(qm); in qm_hw_error_uninit()
2903 if (!qm->ops->hw_error_handle) { in qm_hw_error_handle()
2904 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); in qm_hw_error_handle()
2908 return qm->ops->hw_error_handle(qm); in qm_hw_error_handle()
2912 * hisi_qm_dev_err_init() - Initialize device error configuration.
2919 if (qm->fun_type == QM_HW_VF) in hisi_qm_dev_err_init()
2924 if (!qm->err_ini->hw_err_enable) { in hisi_qm_dev_err_init()
2925 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n"); in hisi_qm_dev_err_init()
2928 qm->err_ini->hw_err_enable(qm); in hisi_qm_dev_err_init()
2933 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
2940 if (qm->fun_type == QM_HW_VF) in hisi_qm_dev_err_uninit()
2945 if (!qm->err_ini->hw_err_disable) { in hisi_qm_dev_err_uninit()
2946 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n"); in hisi_qm_dev_err_uninit()
2949 qm->err_ini->hw_err_disable(qm); in hisi_qm_dev_err_uninit()
2954 * hisi_qm_free_qps() - free multiple queue pairs.
2965 for (i = qp_num - 1; i >= 0; i--) in hisi_qm_free_qps()
2975 list_del(&res->list); in free_list()
2989 list_for_each_entry(qm, &qm_list->list, list) { in hisi_qm_sort_devices()
2990 dev = &qm->pdev->dev; in hisi_qm_sort_devices()
3000 return -ENOMEM; in hisi_qm_sort_devices()
3002 res->qm = qm; in hisi_qm_sort_devices()
3003 res->distance = node_distance(dev_node, node); in hisi_qm_sort_devices()
3006 if (res->distance < tmp->distance) { in hisi_qm_sort_devices()
3007 n = &tmp->list; in hisi_qm_sort_devices()
3011 list_add_tail(&res->list, n); in hisi_qm_sort_devices()
3018 * hisi_qm_alloc_qps_node() - Create multiple queue pairs.
3033 int ret = -ENODEV; in hisi_qm_alloc_qps_node()
3038 return -EINVAL; in hisi_qm_alloc_qps_node()
3040 mutex_lock(&qm_list->lock); in hisi_qm_alloc_qps_node()
3042 mutex_unlock(&qm_list->lock); in hisi_qm_alloc_qps_node()
3048 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); in hisi_qm_alloc_qps_node()
3061 mutex_unlock(&qm_list->lock); in hisi_qm_alloc_qps_node()
3075 u32 q_base = qm->qp_num; in qm_vf_q_assign()
3079 return -EINVAL; in qm_vf_q_assign()
3081 remain_q_num = qm->ctrl_qp_num - qm->qp_num; in qm_vf_q_assign()
3084 if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs) in qm_vf_q_assign()
3085 return -EINVAL; in qm_vf_q_assign()
3093 for (j = i; j > 0; j--) in qm_vf_q_assign()
3108 for (i = 1; i <= qm->vfs_num; i++) { in qm_clear_vft_config()
3113 qm->vfs_num = 0; in qm_clear_vft_config()
3119 * hisi_qm_sriov_enable() - enable virtual functions
3135 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n", in hisi_qm_sriov_enable()
3147 qm->vfs_num = num_vfs; in hisi_qm_sriov_enable()
3163 * hisi_qm_sriov_disable - disable virtual functions
3175 return -EPERM; in hisi_qm_sriov_disable()
3179 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) { in hisi_qm_sriov_disable()
3181 return -EBUSY; in hisi_qm_sriov_disable()
3190 * hisi_qm_sriov_configure - configure the number of VFs
3194 * Enable SR-IOV according to num_vfs, 0 means disable.
3209 if (!qm->err_ini->get_dev_hw_err_status) { in qm_dev_err_handle()
3210 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n"); in qm_dev_err_handle()
3215 err_sts = qm->err_ini->get_dev_hw_err_status(qm); in qm_dev_err_handle()
3217 if (err_sts & qm->err_ini->err_info.ecc_2bits_mask) in qm_dev_err_handle()
3218 qm->err_status.is_dev_ecc_mbit = true; in qm_dev_err_handle()
3220 if (!qm->err_ini->log_dev_hw_err) { in qm_dev_err_handle()
3221 dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n"); in qm_dev_err_handle()
3225 qm->err_ini->log_dev_hw_err(qm, err_sts); in qm_dev_err_handle()
3248 * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
3261 if (pdev->is_virtfn) in hisi_qm_dev_err_detected()
3278 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); in qm_get_hw_error_status()
3283 struct pci_dev *pdev = qm->pdev; in qm_check_req_recv()
3287 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); in qm_check_req_recv()
3288 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, in qm_check_req_recv()
3292 dev_err(&pdev->dev, "Fails to read QM reg!\n"); in qm_check_req_recv()
3296 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); in qm_check_req_recv()
3297 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, in qm_check_req_recv()
3301 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); in qm_check_req_recv()
3308 struct pci_dev *pdev = qm->pdev; in qm_set_pf_mse()
3327 return -ETIMEDOUT; in qm_set_pf_mse()
3332 struct pci_dev *pdev = qm->pdev; in qm_set_vf_mse()
3354 return -ETIMEDOUT; in qm_set_vf_mse()
3359 struct pci_dev *pdev = qm->pdev; in qm_set_msi()
3362 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, in qm_set_msi()
3365 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, in qm_set_msi()
3367 if (qm->err_status.is_qm_ecc_mbit || in qm_set_msi()
3368 qm->err_status.is_dev_ecc_mbit) in qm_set_msi()
3372 if (readl(qm->io_base + QM_PEH_DFX_INFO0)) in qm_set_msi()
3373 return -EFAULT; in qm_set_msi()
3382 struct hisi_qm_list *qm_list = qm->qm_list; in qm_vf_reset_prepare()
3383 struct pci_dev *pdev = qm->pdev; in qm_vf_reset_prepare()
3388 mutex_lock(&qm_list->lock); in qm_vf_reset_prepare()
3389 list_for_each_entry(vf_qm, &qm_list->list, list) { in qm_vf_reset_prepare()
3390 virtfn = vf_qm->pdev; in qm_vf_reset_prepare()
3405 mutex_unlock(&qm_list->lock); in qm_vf_reset_prepare()
3411 struct pci_dev *pdev = qm->pdev; in qm_reset_prepare_ready()
3416 while (test_and_set_bit(QM_DEV_RESET_FLAG, &pf_qm->reset_flag)) { in qm_reset_prepare_ready()
3419 return -EBUSY; in qm_reset_prepare_ready()
3427 struct pci_dev *pdev = qm->pdev; in qm_controller_reset_prepare()
3436 if (qm->vfs_num) { in qm_controller_reset_prepare()
3457 if (!qm->err_status.is_dev_ecc_mbit && in qm_dev_ecc_mbit_handle()
3458 qm->err_status.is_qm_ecc_mbit && in qm_dev_ecc_mbit_handle()
3459 qm->err_ini->close_axi_master_ooo) { in qm_dev_ecc_mbit_handle()
3461 qm->err_ini->close_axi_master_ooo(qm); in qm_dev_ecc_mbit_handle()
3463 } else if (qm->err_status.is_dev_ecc_mbit && in qm_dev_ecc_mbit_handle()
3464 !qm->err_status.is_qm_ecc_mbit && in qm_dev_ecc_mbit_handle()
3465 !qm->err_ini->close_axi_master_ooo) { in qm_dev_ecc_mbit_handle()
3467 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); in qm_dev_ecc_mbit_handle()
3469 qm->io_base + QM_RAS_NFE_ENABLE); in qm_dev_ecc_mbit_handle()
3470 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); in qm_dev_ecc_mbit_handle()
3476 struct pci_dev *pdev = qm->pdev; in qm_soft_reset()
3485 if (qm->vfs_num) { in qm_soft_reset()
3503 qm->io_base + ACC_MASTER_GLOBAL_CTRL); in qm_soft_reset()
3505 /* If bus lock, reset chip */ in qm_soft_reset()
3506 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, in qm_soft_reset()
3511 pci_emerg(pdev, "Bus lock! Please reset system.\n"); in qm_soft_reset()
3521 /* The reset related sub-control registers are not in PCI BAR */ in qm_soft_reset()
3522 if (ACPI_HANDLE(&pdev->dev)) { in qm_soft_reset()
3526 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), in qm_soft_reset()
3527 qm->err_ini->err_info.acpi_rst, in qm_soft_reset()
3531 return -EIO; in qm_soft_reset()
3536 return -EIO; in qm_soft_reset()
3540 return -EINVAL; in qm_soft_reset()
3548 struct hisi_qm_list *qm_list = qm->qm_list; in qm_vf_reset_done()
3549 struct pci_dev *pdev = qm->pdev; in qm_vf_reset_done()
3554 mutex_lock(&qm_list->lock); in qm_vf_reset_done()
3555 list_for_each_entry(vf_qm, &qm_list->list, list) { in qm_vf_reset_done()
3556 virtfn = vf_qm->pdev; in qm_vf_reset_done()
3571 mutex_unlock(&qm_list->lock); in qm_vf_reset_done()
3577 return qm->err_ini->get_dev_hw_err_status(qm); in qm_get_dev_err_status()
3582 return qm->err_ini->hw_init(qm); in qm_dev_hw_init()
3589 if (!qm->err_status.is_qm_ecc_mbit && in qm_restart_prepare()
3590 !qm->err_status.is_dev_ecc_mbit) in qm_restart_prepare()
3594 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_prepare()
3595 writel(value & ~qm->err_ini->err_info.msi_wr_port, in qm_restart_prepare()
3596 qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_prepare()
3600 qm->err_ini->err_info.ecc_2bits_mask; in qm_restart_prepare()
3601 if (value && qm->err_ini->clear_dev_hw_err_status) in qm_restart_prepare()
3602 qm->err_ini->clear_dev_hw_err_status(qm, value); in qm_restart_prepare()
3605 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_restart_prepare()
3608 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); in qm_restart_prepare()
3610 if (qm->err_ini->open_axi_master_ooo) in qm_restart_prepare()
3611 qm->err_ini->open_axi_master_ooo(qm); in qm_restart_prepare()
3618 if (!qm->err_status.is_qm_ecc_mbit && in qm_restart_done()
3619 !qm->err_status.is_dev_ecc_mbit) in qm_restart_done()
3623 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_done()
3624 value |= qm->err_ini->err_info.msi_wr_port; in qm_restart_done()
3625 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_done()
3627 qm->err_status.is_qm_ecc_mbit = false; in qm_restart_done()
3628 qm->err_status.is_dev_ecc_mbit = false; in qm_restart_done()
3633 struct pci_dev *pdev = qm->pdev; in qm_controller_reset_done()
3648 if (qm->vfs_num) { in qm_controller_reset_done()
3670 if (qm->vfs_num) { in qm_controller_reset_done()
3671 ret = qm_vf_q_assign(qm, qm->vfs_num); in qm_controller_reset_done()
3681 return -EPERM; in qm_controller_reset_done()
3687 clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag); in qm_controller_reset_done()
3694 struct pci_dev *pdev = qm->pdev; in qm_controller_reset()
3719 * hisi_qm_dev_slot_reset() - slot reset
3730 if (pdev->is_virtfn) in hisi_qm_dev_slot_reset()
3746 /* check the interrupt is ecc-mbit error or not */
3751 if (qm->fun_type == QM_HW_VF) in qm_check_dev_error()
3759 qm->err_ini->err_info.ecc_2bits_mask); in qm_check_dev_error()
3787 if (qm->vfs_num) { in hisi_qm_reset_prepare()
3812 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); in qm_flr_reset_complete()
3818 clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag); in qm_flr_reset_complete()
3837 if (qm->fun_type == QM_HW_PF) { in hisi_qm_reset_done()
3844 if (!qm->vfs_num) in hisi_qm_reset_done()
3847 ret = qm_vf_q_assign(qm, qm->vfs_num); in hisi_qm_reset_done()
3871 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); in qm_abnormal_irq()
3874 schedule_work(&qm->rst_work); in qm_abnormal_irq()
3881 struct pci_dev *pdev = qm->pdev; in qm_irq_register()
3885 qm_irq, IRQF_SHARED, qm->dev_name, qm); in qm_irq_register()
3889 if (qm->ver != QM_HW_V1) { in qm_irq_register()
3891 qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm); in qm_irq_register()
3895 if (qm->fun_type == QM_HW_PF) { in qm_irq_register()
3899 qm->dev_name, qm); in qm_irq_register()
3915 * hisi_qm_dev_shutdown() - Shutdown device.
3927 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); in hisi_qm_dev_shutdown()
3939 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); in hisi_qm_controller_reset()
3944 * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
3956 mutex_lock(&qm_list->lock); in hisi_qm_alg_register()
3957 if (list_empty(&qm_list->list)) in hisi_qm_alg_register()
3959 list_add_tail(&qm->list, &qm_list->list); in hisi_qm_alg_register()
3960 mutex_unlock(&qm_list->lock); in hisi_qm_alg_register()
3963 ret = qm_list->register_to_crypto(); in hisi_qm_alg_register()
3965 mutex_lock(&qm_list->lock); in hisi_qm_alg_register()
3966 list_del(&qm->list); in hisi_qm_alg_register()
3967 mutex_unlock(&qm_list->lock); in hisi_qm_alg_register()
3976 * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
3986 mutex_lock(&qm_list->lock); in hisi_qm_alg_unregister()
3987 list_del(&qm->list); in hisi_qm_alg_unregister()
3988 mutex_unlock(&qm_list->lock); in hisi_qm_alg_unregister()
3990 if (list_empty(&qm_list->list)) in hisi_qm_alg_unregister()
3991 qm_list->unregister_from_crypto(); in hisi_qm_alg_unregister()
3996 * hisi_qm_init() - Initialize configures about qm.
4003 struct pci_dev *pdev = qm->pdev; in hisi_qm_init()
4004 struct device *dev = &pdev->dev; in hisi_qm_init()
4012 dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret); in hisi_qm_init()
4016 dev_err(&pdev->dev, "Failed to enable device mem!\n"); in hisi_qm_init()
4020 ret = pci_request_mem_regions(pdev, qm->dev_name); in hisi_qm_init()
4022 dev_err(&pdev->dev, "Failed to request mem regions!\n"); in hisi_qm_init()
4026 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); in hisi_qm_init()
4027 qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2); in hisi_qm_init()
4028 qm->io_base = ioremap(qm->phys_base, qm->phys_size); in hisi_qm_init()
4029 if (!qm->io_base) { in hisi_qm_init()
4030 ret = -EIO; in hisi_qm_init()
4039 if (!qm->ops->get_irq_num) { in hisi_qm_init()
4040 ret = -EOPNOTSUPP; in hisi_qm_init()
4043 num_vec = qm->ops->get_irq_num(qm); in hisi_qm_init()
4054 if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) { in hisi_qm_init()
4056 ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); in hisi_qm_init()
4065 INIT_WORK(&qm->work, qm_work_process); in hisi_qm_init()
4066 if (qm->fun_type == QM_HW_PF) in hisi_qm_init()
4067 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); in hisi_qm_init()
4069 atomic_set(&qm->status.flags, QM_INIT); in hisi_qm_init()
4078 iounmap(qm->io_base); in hisi_qm_init()
4084 uacce_remove(qm->uacce); in hisi_qm_init()
4085 qm->uacce = NULL; in hisi_qm_init()