Lines Matching +full:dfx +full:- +full:bus
1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/dma-mapping.h>
49 #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
59 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
61 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
67 #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
70 #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
240 ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
254 ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
257 (qc)->head = 0; \
258 (qc)->tail = 0; \
259 (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \
260 (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \
261 (qc)->dw3 = 0; \
262 (qc)->w8 = 0; \
263 (qc)->rsvd0 = 0; \
264 (qc)->pasid = cpu_to_le16(pasid); \
265 (qc)->w11 = 0; \
266 (qc)->rsvd1 = 0; \
376 * struct qm_hw_err - Structure describing the device errors
461 enum qm_state curr = atomic_read(&qm->status.flags); in qm_avail_state()
481 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n", in qm_avail_state()
485 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n", in qm_avail_state()
494 enum qm_state qm_curr = atomic_read(&qm->status.flags); in qm_qp_avail_state()
499 qp_curr = atomic_read(&qp->qp_status.flags); in qm_qp_avail_state()
527 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n", in qm_qp_avail_state()
531 dev_warn(&qm->pdev->dev, in qm_qp_avail_state()
540 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); in qm_get_hw_error_status()
545 return qm->err_ini->get_dev_hw_err_status(qm); in qm_get_dev_err_status()
553 if (qm->fun_type == QM_HW_VF) in qm_check_dev_error()
556 val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask; in qm_check_dev_error()
557 dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask; in qm_check_dev_error()
567 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_wait_reset_finish()
570 return -EBUSY; in qm_wait_reset_finish()
578 struct pci_dev *pdev = qm->pdev; in qm_reset_prepare_ready()
585 if (qm->ver < QM_HW_V3) in qm_reset_prepare_ready()
593 struct pci_dev *pdev = qm->pdev; in qm_reset_bit_clear()
596 if (qm->ver < QM_HW_V3) in qm_reset_bit_clear()
597 clear_bit(QM_RESETTING, &pf_qm->misc_ctl); in qm_reset_bit_clear()
599 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_reset_bit_clear()
605 mailbox->w0 = cpu_to_le16((cmd) | in qm_mb_pre_init()
608 mailbox->queue_num = cpu_to_le16(queue); in qm_mb_pre_init()
609 mailbox->base_l = cpu_to_le32(lower_32_bits(base)); in qm_mb_pre_init()
610 mailbox->base_h = cpu_to_le32(upper_32_bits(base)); in qm_mb_pre_init()
611 mailbox->rsvd = 0; in qm_mb_pre_init()
614 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
619 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, in hisi_qm_wait_mb_ready()
628 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; in qm_mb_write()
658 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); in qm_mb_nolock()
659 ret = -EBUSY; in qm_mb_nolock()
666 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); in qm_mb_nolock()
667 ret = -ETIMEDOUT; in qm_mb_nolock()
671 val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); in qm_mb_nolock()
673 dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); in qm_mb_nolock()
674 ret = -EIO; in qm_mb_nolock()
681 atomic64_inc(&qm->debug.dfx.mb_err_cnt); in qm_mb_nolock()
691 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n", in hisi_qm_mb()
696 mutex_lock(&qm->mailbox_lock); in hisi_qm_mb()
698 mutex_unlock(&qm->mailbox_lock); in hisi_qm_mb()
712 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); in qm_db_v1()
717 void __iomem *io_base = qm->io_base; in qm_db_v2()
722 io_base = qm->db_io_base + (u64)qn * qm->db_interval + in qm_db_v2()
737 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", in qm_db()
740 qm->ops->qm_db(qm, qn, cmd, index, priority); in qm_db()
748 if (qm->ver < QM_HW_V3) in qm_disable_clock_gate()
751 val = readl(qm->io_base + QM_PM_CTRL); in qm_disable_clock_gate()
753 writel(val, qm->io_base + QM_PM_CTRL); in qm_disable_clock_gate()
760 writel(0x1, qm->io_base + QM_MEM_START_INIT); in qm_dev_mem_reset()
761 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, in qm_dev_mem_reset()
767 * hisi_qm_get_hw_info() - Get device information.
781 switch (qm->ver) { in hisi_qm_get_hw_info()
790 val = readl(qm->io_base + info_table[index].offset); in hisi_qm_get_hw_info()
801 depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); in qm_get_xqc_depth()
809 struct device *dev = &qm->pdev->dev; in hisi_qm_set_algs()
813 if (!qm->uacce) in hisi_qm_set_algs()
819 return -EINVAL; in hisi_qm_set_algs()
824 return -ENOMEM; in hisi_qm_set_algs()
833 qm->uacce->algs = algs; in hisi_qm_set_algs()
842 if (qm->fun_type == QM_HW_PF) in qm_get_irq_num()
843 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver); in qm_get_irq_num()
845 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver); in qm_get_irq_num()
850 struct device *dev = &qm->pdev->dev; in qm_pm_get_sync()
853 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in qm_pm_get_sync()
867 struct device *dev = &qm->pdev->dev; in qm_pm_put_sync()
869 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in qm_pm_put_sync()
878 if (qp->qp_status.cq_head == qp->cq_depth - 1) { in qm_cq_head_update()
879 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; in qm_cq_head_update()
880 qp->qp_status.cq_head = 0; in qm_cq_head_update()
882 qp->qp_status.cq_head++; in qm_cq_head_update()
888 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; in qm_poll_req_cb()
889 struct hisi_qm *qm = qp->qm; in qm_poll_req_cb()
891 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { in qm_poll_req_cb()
893 qp->req_cb(qp, qp->sqe + qm->sqe_size * in qm_poll_req_cb()
894 le16_to_cpu(cqe->sq_head)); in qm_poll_req_cb()
896 cqe = qp->cqe + qp->qp_status.cq_head; in qm_poll_req_cb()
897 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, in qm_poll_req_cb()
898 qp->qp_status.cq_head, 0); in qm_poll_req_cb()
899 atomic_dec(&qp->qp_status.used); in qm_poll_req_cb()
905 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); in qm_poll_req_cb()
912 struct hisi_qm *qm = poll_data->qm; in qm_work_process()
913 u16 eqe_num = poll_data->eqe_num; in qm_work_process()
917 for (i = eqe_num - 1; i >= 0; i--) { in qm_work_process()
918 qp = &qm->qp_array[poll_data->qp_finish_id[i]]; in qm_work_process()
919 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) in qm_work_process()
922 if (qp->event_cb) { in qm_work_process()
923 qp->event_cb(qp); in qm_work_process()
927 if (likely(qp->req_cb)) in qm_work_process()
934 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; in qm_get_complete_eqe_num()
936 u16 eq_depth = qm->eq_depth; in qm_get_complete_eqe_num()
939 if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) { in qm_get_complete_eqe_num()
940 atomic64_inc(&qm->debug.dfx.err_irq_cnt); in qm_get_complete_eqe_num()
941 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_get_complete_eqe_num()
945 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; in qm_get_complete_eqe_num()
946 if (unlikely(cqn >= qm->qp_num)) in qm_get_complete_eqe_num()
948 poll_data = &qm->poll_data[cqn]; in qm_get_complete_eqe_num()
950 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { in qm_get_complete_eqe_num()
951 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; in qm_get_complete_eqe_num()
952 poll_data->qp_finish_id[eqe_num] = cqn; in qm_get_complete_eqe_num()
955 if (qm->status.eq_head == eq_depth - 1) { in qm_get_complete_eqe_num()
956 qm->status.eqc_phase = !qm->status.eqc_phase; in qm_get_complete_eqe_num()
957 eqe = qm->eqe; in qm_get_complete_eqe_num()
958 qm->status.eq_head = 0; in qm_get_complete_eqe_num()
961 qm->status.eq_head++; in qm_get_complete_eqe_num()
964 if (eqe_num == (eq_depth >> 1) - 1) in qm_get_complete_eqe_num()
968 poll_data->eqe_num = eqe_num; in qm_get_complete_eqe_num()
969 queue_work(qm->wq, &poll_data->work); in qm_get_complete_eqe_num()
970 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_get_complete_eqe_num()
977 /* Get qp id of completed tasks and re-enable the interrupt */ in qm_eq_irq()
988 val = readl(qm->io_base + QM_IFC_INT_STATUS); in qm_mb_cmd_irq()
993 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) { in qm_mb_cmd_irq()
994 dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n"); in qm_mb_cmd_irq()
998 schedule_work(&qm->cmd_process); in qm_mb_cmd_irq()
1007 if (qp->is_in_kernel) in qm_set_qp_disable()
1010 addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset; in qm_set_qp_disable()
1019 struct hisi_qp *qp = &qm->qp_array[qp_id]; in qm_disable_qp()
1028 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); in qm_reset_function()
1029 struct device *dev = &qm->pdev->dev; in qm_reset_function()
1058 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; in qm_aeq_thread()
1059 u16 aeq_depth = qm->aeq_depth; in qm_aeq_thread()
1062 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); in qm_aeq_thread()
1064 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { in qm_aeq_thread()
1065 type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT; in qm_aeq_thread()
1066 qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK; in qm_aeq_thread()
1070 dev_err(&qm->pdev->dev, "eq overflow, reset function\n"); in qm_aeq_thread()
1074 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n", in qm_aeq_thread()
1081 dev_err(&qm->pdev->dev, "unknown error type %u\n", in qm_aeq_thread()
1086 if (qm->status.aeq_head == aeq_depth - 1) { in qm_aeq_thread()
1087 qm->status.aeqc_phase = !qm->status.aeqc_phase; in qm_aeq_thread()
1088 aeqe = qm->aeqe; in qm_aeq_thread()
1089 qm->status.aeq_head = 0; in qm_aeq_thread()
1092 qm->status.aeq_head++; in qm_aeq_thread()
1096 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); in qm_aeq_thread()
1103 struct hisi_qp_status *qp_status = &qp->qp_status; in qm_init_qp_status()
1105 qp_status->sq_tail = 0; in qm_init_qp_status()
1106 qp_status->cq_head = 0; in qm_init_qp_status()
1107 qp_status->cqc_phase = true; in qm_init_qp_status()
1108 atomic_set(&qp_status->used, 0); in qm_init_qp_status()
1113 struct device *dev = &qm->pdev->dev; in qm_init_prefetch()
1116 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) in qm_init_prefetch()
1134 writel(page_type, qm->io_base + QM_PAGE_SIZE); in qm_init_prefetch()
1144 * IR(Mbps) = -------------------------
1184 factor->cbs_s = acc_shaper_calc_cbs_s(ir); in qm_get_shaper_para()
1191 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; in qm_get_shaper_para()
1193 factor->cir_b = cir_b; in qm_get_shaper_para()
1194 factor->cir_u = cir_u; in qm_get_shaper_para()
1195 factor->cir_s = cir_s; in qm_get_shaper_para()
1201 return -EINVAL; in qm_get_shaper_para()
1212 if (qm->ver == QM_HW_V1) { in qm_vft_data_cfg()
1221 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; in qm_vft_data_cfg()
1225 if (qm->ver == QM_HW_V1) { in qm_vft_data_cfg()
1236 tmp = factor->cir_b | in qm_vft_data_cfg()
1237 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | in qm_vft_data_cfg()
1238 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | in qm_vft_data_cfg()
1240 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT); in qm_vft_data_cfg()
1246 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); in qm_vft_data_cfg()
1247 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); in qm_vft_data_cfg()
1257 if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in qm_set_vft_common()
1258 factor = &qm->factor[fun_num]; in qm_set_vft_common()
1260 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_set_vft_common()
1266 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); in qm_set_vft_common()
1267 writel(type, qm->io_base + QM_VFT_CFG_TYPE); in qm_set_vft_common()
1271 writel(fun_num, qm->io_base + QM_VFT_CFG); in qm_set_vft_common()
1275 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); in qm_set_vft_common()
1276 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); in qm_set_vft_common()
1278 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_set_vft_common()
1285 u32 qos = qm->factor[fun_num].func_qos; in qm_shaper_init_vft()
1288 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]); in qm_shaper_init_vft()
1290 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n"); in qm_shaper_init_vft()
1293 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG); in qm_shaper_init_vft()
1317 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { in qm_set_sqc_cqc_vft()
1340 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | in qm_get_vft_v2()
1341 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); in qm_get_vft_v2()
1352 struct device *dev = &qm->pdev->dev; in hisi_qm_ctx_alloc()
1357 return ERR_PTR(-ENOMEM); in hisi_qm_ctx_alloc()
1363 return ERR_PTR(-ENOMEM); in hisi_qm_ctx_alloc()
1372 struct device *dev = &qm->pdev->dev; in hisi_qm_ctx_free()
1390 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v1()
1395 struct hisi_qm_err_info *err_info = &qm->err_info; in qm_hw_error_cfg()
1397 qm->error_mask = err_info->nfe | err_info->ce | err_info->fe; in qm_hw_error_cfg()
1399 writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_hw_error_cfg()
1402 writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE); in qm_hw_error_cfg()
1403 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); in qm_hw_error_cfg()
1404 writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE); in qm_hw_error_cfg()
1405 writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE); in qm_hw_error_cfg()
1414 irq_unmask = ~qm->error_mask; in qm_hw_error_init_v2()
1415 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v2()
1416 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v2()
1421 u32 irq_mask = qm->error_mask; in qm_hw_error_uninit_v2()
1423 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v2()
1424 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v2()
1434 writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); in qm_hw_error_init_v3()
1436 irq_unmask = ~qm->error_mask; in qm_hw_error_init_v3()
1437 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v3()
1438 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v3()
1443 u32 irq_mask = qm->error_mask; in qm_hw_error_uninit_v3()
1445 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v3()
1446 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v3()
1449 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); in qm_hw_error_uninit_v3()
1455 struct device *dev = &qm->pdev->dev; in qm_log_hw_error()
1461 if (!(err->int_msk & error_status)) in qm_log_hw_error()
1465 err->msg, err->int_msk); in qm_log_hw_error()
1467 if (err->int_msk & QM_DB_TIMEOUT) { in qm_log_hw_error()
1468 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01); in qm_log_hw_error()
1474 } else if (err->int_msk & QM_OF_FIFO_OF) { in qm_log_hw_error()
1475 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00); in qm_log_hw_error()
1494 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); in qm_hw_error_handle_v2()
1495 error_status = qm->error_mask & tmp; in qm_hw_error_handle_v2()
1499 qm->err_status.is_qm_ecc_mbit = true; in qm_hw_error_handle_v2()
1502 if (error_status & qm->err_info.qm_reset_mask) in qm_hw_error_handle_v2()
1505 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_hw_error_handle_v2()
1506 writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); in qm_hw_error_handle_v2()
1518 mutex_lock(&qm->mailbox_lock); in qm_get_mb_cmd()
1523 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | in qm_get_mb_cmd()
1524 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); in qm_get_mb_cmd()
1527 mutex_unlock(&qm->mailbox_lock); in qm_get_mb_cmd()
1535 if (qm->fun_type == QM_HW_PF) in qm_clear_cmd_interrupt()
1536 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P); in qm_clear_cmd_interrupt()
1538 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V); in qm_clear_cmd_interrupt()
1540 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V); in qm_clear_cmd_interrupt()
1545 struct device *dev = &qm->pdev->dev; in qm_handle_vf_msg()
1575 struct device *dev = &qm->pdev->dev; in qm_wait_vf_prepare_finish()
1576 u32 vfs_num = qm->vfs_num; in qm_wait_vf_prepare_finish()
1582 if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in qm_wait_vf_prepare_finish()
1586 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); in qm_wait_vf_prepare_finish()
1592 ret = -EBUSY; in qm_wait_vf_prepare_finish()
1617 val = readl(qm->io_base + QM_IFC_INT_CFG); in qm_trigger_vf_interrupt()
1620 writel(val, qm->io_base + QM_IFC_INT_CFG); in qm_trigger_vf_interrupt()
1622 val = readl(qm->io_base + QM_IFC_INT_SET_P); in qm_trigger_vf_interrupt()
1624 writel(val, qm->io_base + QM_IFC_INT_SET_P); in qm_trigger_vf_interrupt()
1631 val = readl(qm->io_base + QM_IFC_INT_SET_V); in qm_trigger_pf_interrupt()
1633 writel(val, qm->io_base + QM_IFC_INT_SET_V); in qm_trigger_pf_interrupt()
1638 struct device *dev = &qm->pdev->dev; in qm_ping_single_vf()
1645 mutex_lock(&qm->mailbox_lock); in qm_ping_single_vf()
1655 val = readq(qm->io_base + QM_IFC_READY_STATUS); in qm_ping_single_vf()
1662 ret = -ETIMEDOUT; in qm_ping_single_vf()
1668 mutex_unlock(&qm->mailbox_lock); in qm_ping_single_vf()
1674 struct device *dev = &qm->pdev->dev; in qm_ping_all_vfs()
1675 u32 vfs_num = qm->vfs_num; in qm_ping_all_vfs()
1683 mutex_lock(&qm->mailbox_lock); in qm_ping_all_vfs()
1688 mutex_unlock(&qm->mailbox_lock); in qm_ping_all_vfs()
1695 val = readq(qm->io_base + QM_IFC_READY_STATUS); in qm_ping_all_vfs()
1698 mutex_unlock(&qm->mailbox_lock); in qm_ping_all_vfs()
1706 mutex_unlock(&qm->mailbox_lock); in qm_ping_all_vfs()
1714 return -ETIMEDOUT; in qm_ping_all_vfs()
1725 mutex_lock(&qm->mailbox_lock); in qm_ping_pf()
1728 dev_err(&qm->pdev->dev, "failed to send command to PF!\n"); in qm_ping_pf()
1736 val = readl(qm->io_base + QM_IFC_INT_SET_V); in qm_ping_pf()
1741 ret = -ETIMEDOUT; in qm_ping_pf()
1747 mutex_unlock(&qm->mailbox_lock); in qm_ping_pf()
1753 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); in qm_stop_qp()
1758 struct pci_dev *pdev = qm->pdev; in qm_set_msi()
1761 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, in qm_set_msi()
1764 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, in qm_set_msi()
1766 if (qm->err_status.is_qm_ecc_mbit || in qm_set_msi()
1767 qm->err_status.is_dev_ecc_mbit) in qm_set_msi()
1771 if (readl(qm->io_base + QM_PEH_DFX_INFO0)) in qm_set_msi()
1772 return -EFAULT; in qm_set_msi()
1780 struct pci_dev *pdev = qm->pdev; in qm_wait_msi_finish()
1787 pci_read_config_dword(pdev, pdev->msi_cap + in qm_wait_msi_finish()
1800 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0, in qm_wait_msi_finish()
1806 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1, in qm_wait_msi_finish()
1815 struct pci_dev *pdev = qm->pdev; in qm_set_msi_v3()
1816 int ret = -ETIMEDOUT; in qm_set_msi_v3()
1819 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); in qm_set_msi_v3()
1825 pci_write_config_dword(pdev, pdev->msi_cap, cmd); in qm_set_msi_v3()
1828 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); in qm_set_msi_v3()
1869 struct hisi_qp_status *qp_status = &qp->qp_status; in qm_get_avail_sqe()
1870 u16 sq_tail = qp_status->sq_tail; in qm_get_avail_sqe()
1872 if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1)) in qm_get_avail_sqe()
1875 return qp->sqe + sq_tail * qp->qm->sqe_size; in qm_get_avail_sqe()
1883 addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET; in hisi_qm_unset_hw_reset()
1889 struct device *dev = &qm->pdev->dev; in qm_create_qp_nolock()
1894 return ERR_PTR(-EPERM); in qm_create_qp_nolock()
1896 if (qm->qp_in_used == qm->qp_num) { in qm_create_qp_nolock()
1898 qm->qp_num); in qm_create_qp_nolock()
1899 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); in qm_create_qp_nolock()
1900 return ERR_PTR(-EBUSY); in qm_create_qp_nolock()
1903 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); in qm_create_qp_nolock()
1906 qm->qp_num); in qm_create_qp_nolock()
1907 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); in qm_create_qp_nolock()
1908 return ERR_PTR(-EBUSY); in qm_create_qp_nolock()
1911 qp = &qm->qp_array[qp_id]; in qm_create_qp_nolock()
1913 memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth); in qm_create_qp_nolock()
1915 qp->event_cb = NULL; in qm_create_qp_nolock()
1916 qp->req_cb = NULL; in qm_create_qp_nolock()
1917 qp->qp_id = qp_id; in qm_create_qp_nolock()
1918 qp->alg_type = alg_type; in qm_create_qp_nolock()
1919 qp->is_in_kernel = true; in qm_create_qp_nolock()
1920 qm->qp_in_used++; in qm_create_qp_nolock()
1921 atomic_set(&qp->qp_status.flags, QP_INIT); in qm_create_qp_nolock()
1927 * hisi_qm_create_qp() - Create a queue pair from qm.
1942 down_write(&qm->qps_lock); in hisi_qm_create_qp()
1944 up_write(&qm->qps_lock); in hisi_qm_create_qp()
1953 * hisi_qm_release_qp() - Release a qp back to its qm.
1960 struct hisi_qm *qm = qp->qm; in hisi_qm_release_qp()
1962 down_write(&qm->qps_lock); in hisi_qm_release_qp()
1965 up_write(&qm->qps_lock); in hisi_qm_release_qp()
1969 qm->qp_in_used--; in hisi_qm_release_qp()
1970 idr_remove(&qm->qp_idr, qp->qp_id); in hisi_qm_release_qp()
1972 up_write(&qm->qps_lock); in hisi_qm_release_qp()
1979 struct hisi_qm *qm = qp->qm; in qm_sq_ctx_cfg()
1980 struct device *dev = &qm->pdev->dev; in qm_sq_ctx_cfg()
1981 enum qm_hw_ver ver = qm->ver; in qm_sq_ctx_cfg()
1988 return -ENOMEM; in qm_sq_ctx_cfg()
1990 INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); in qm_sq_ctx_cfg()
1992 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); in qm_sq_ctx_cfg()
1993 sqc->w8 = cpu_to_le16(qp->sq_depth - 1); in qm_sq_ctx_cfg()
1995 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); in qm_sq_ctx_cfg()
1996 sqc->w8 = 0; /* rand_qc */ in qm_sq_ctx_cfg()
1998 sqc->cq_num = cpu_to_le16(qp_id); in qm_sq_ctx_cfg()
1999 sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); in qm_sq_ctx_cfg()
2001 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) in qm_sq_ctx_cfg()
2002 sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE << in qm_sq_ctx_cfg()
2009 return -ENOMEM; in qm_sq_ctx_cfg()
2021 struct hisi_qm *qm = qp->qm; in qm_cq_ctx_cfg()
2022 struct device *dev = &qm->pdev->dev; in qm_cq_ctx_cfg()
2023 enum qm_hw_ver ver = qm->ver; in qm_cq_ctx_cfg()
2030 return -ENOMEM; in qm_cq_ctx_cfg()
2032 INIT_QC_COMMON(cqc, qp->cqe_dma, pasid); in qm_cq_ctx_cfg()
2034 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, in qm_cq_ctx_cfg()
2036 cqc->w8 = cpu_to_le16(qp->cq_depth - 1); in qm_cq_ctx_cfg()
2038 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); in qm_cq_ctx_cfg()
2039 cqc->w8 = 0; /* rand_qc */ in qm_cq_ctx_cfg()
2041 cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); in qm_cq_ctx_cfg()
2043 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) in qm_cq_ctx_cfg()
2044 cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE); in qm_cq_ctx_cfg()
2050 return -ENOMEM; in qm_cq_ctx_cfg()
2075 struct hisi_qm *qm = qp->qm; in qm_start_qp_nolock()
2076 struct device *dev = &qm->pdev->dev; in qm_start_qp_nolock()
2077 int qp_id = qp->qp_id; in qm_start_qp_nolock()
2082 return -EPERM; in qm_start_qp_nolock()
2088 atomic_set(&qp->qp_status.flags, QP_START); in qm_start_qp_nolock()
2095 * hisi_qm_start_qp() - Start a qp into running.
2104 struct hisi_qm *qm = qp->qm; in hisi_qm_start_qp()
2107 down_write(&qm->qps_lock); in hisi_qm_start_qp()
2109 up_write(&qm->qps_lock); in hisi_qm_start_qp()
2116 * qp_stop_fail_cb() - call request cb.
2123 int qp_used = atomic_read(&qp->qp_status.used); in qp_stop_fail_cb()
2124 u16 cur_tail = qp->qp_status.sq_tail; in qp_stop_fail_cb()
2125 u16 sq_depth = qp->sq_depth; in qp_stop_fail_cb()
2126 u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth; in qp_stop_fail_cb()
2127 struct hisi_qm *qm = qp->qm; in qp_stop_fail_cb()
2133 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); in qp_stop_fail_cb()
2134 atomic_dec(&qp->qp_status.used); in qp_stop_fail_cb()
2139 * qm_drain_qp() - Drain a qp.
2148 struct hisi_qm *qm = qp->qm; in qm_drain_qp()
2149 struct device *dev = &qm->pdev->dev; in qm_drain_qp()
2161 if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { in qm_drain_qp()
2164 dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id); in qm_drain_qp()
2171 return -ENOMEM; in qm_drain_qp()
2175 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id); in qm_drain_qp()
2183 qp->qp_id); in qm_drain_qp()
2190 if ((sqc->tail == cqc->tail) && in qm_drain_qp()
2195 dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id); in qm_drain_qp()
2196 ret = -EBUSY; in qm_drain_qp()
2210 struct device *dev = &qp->qm->pdev->dev; in qm_stop_qp_nolock()
2219 if (atomic_read(&qp->qp_status.flags) == QP_STOP) { in qm_stop_qp_nolock()
2220 qp->is_resetting = false; in qm_stop_qp_nolock()
2224 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP)) in qm_stop_qp_nolock()
2225 return -EPERM; in qm_stop_qp_nolock()
2227 atomic_set(&qp->qp_status.flags, QP_STOP); in qm_stop_qp_nolock()
2234 flush_workqueue(qp->qm->wq); in qm_stop_qp_nolock()
2235 if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) in qm_stop_qp_nolock()
2238 dev_dbg(dev, "stop queue %u!", qp->qp_id); in qm_stop_qp_nolock()
2244 * hisi_qm_stop_qp() - Stop a qp in qm.
2253 down_write(&qp->qm->qps_lock); in hisi_qm_stop_qp()
2255 up_write(&qp->qm->qps_lock); in hisi_qm_stop_qp()
2262 * hisi_qp_send() - Queue up a task in the hardware queue.
2266 * This function will return -EBUSY if qp is currently full, and -EAGAIN
2278 struct hisi_qp_status *qp_status = &qp->qp_status; in hisi_qp_send()
2279 u16 sq_tail = qp_status->sq_tail; in hisi_qp_send()
2280 u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth; in hisi_qp_send()
2283 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || in hisi_qp_send()
2284 atomic_read(&qp->qm->status.flags) == QM_STOP || in hisi_qp_send()
2285 qp->is_resetting)) { in hisi_qp_send()
2286 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); in hisi_qp_send()
2287 return -EAGAIN; in hisi_qp_send()
2291 return -EBUSY; in hisi_qp_send()
2293 memcpy(sqe, msg, qp->qm->sqe_size); in hisi_qp_send()
2295 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); in hisi_qp_send()
2296 atomic_inc(&qp->qp_status.used); in hisi_qp_send()
2297 qp_status->sq_tail = sq_tail_next; in hisi_qp_send()
2307 if (qm->ver == QM_HW_V1) in hisi_qm_cache_wb()
2310 writel(0x1, qm->io_base + QM_CACHE_WB_START); in hisi_qm_cache_wb()
2311 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, in hisi_qm_cache_wb()
2314 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); in hisi_qm_cache_wb()
2319 wake_up_interruptible(&qp->uacce_q->wait); in qm_qp_event_notifier()
2325 struct hisi_qm *qm = uacce->priv; in hisi_qm_get_available_instances()
2328 down_read(&qm->qps_lock); in hisi_qm_get_available_instances()
2329 ret = qm->qp_num - qm->qp_in_used; in hisi_qm_get_available_instances()
2330 up_read(&qm->qps_lock); in hisi_qm_get_available_instances()
2339 for (i = 0; i < qm->qp_num; i++) in hisi_qm_set_hw_reset()
2340 qm_set_qp_disable(&qm->qp_array[i], offset); in hisi_qm_set_hw_reset()
2347 struct hisi_qm *qm = uacce->priv; in hisi_qm_uacce_get_queue()
2355 q->priv = qp; in hisi_qm_uacce_get_queue()
2356 q->uacce = uacce; in hisi_qm_uacce_get_queue()
2357 qp->uacce_q = q; in hisi_qm_uacce_get_queue()
2358 qp->event_cb = qm_qp_event_notifier; in hisi_qm_uacce_get_queue()
2359 qp->pasid = arg; in hisi_qm_uacce_get_queue()
2360 qp->is_in_kernel = false; in hisi_qm_uacce_get_queue()
2367 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_put_queue()
2377 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_mmap()
2378 struct hisi_qm *qm = qp->qm; in hisi_qm_uacce_mmap()
2379 resource_size_t phys_base = qm->db_phys_base + in hisi_qm_uacce_mmap()
2380 qp->qp_id * qm->db_interval; in hisi_qm_uacce_mmap()
2381 size_t sz = vma->vm_end - vma->vm_start; in hisi_qm_uacce_mmap()
2382 struct pci_dev *pdev = qm->pdev; in hisi_qm_uacce_mmap()
2383 struct device *dev = &pdev->dev; in hisi_qm_uacce_mmap()
2387 switch (qfr->type) { in hisi_qm_uacce_mmap()
2389 if (qm->ver == QM_HW_V1) { in hisi_qm_uacce_mmap()
2391 return -EINVAL; in hisi_qm_uacce_mmap()
2392 } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { in hisi_qm_uacce_mmap()
2395 return -EINVAL; in hisi_qm_uacce_mmap()
2397 if (sz > qm->db_interval) in hisi_qm_uacce_mmap()
2398 return -EINVAL; in hisi_qm_uacce_mmap()
2403 return remap_pfn_range(vma, vma->vm_start, in hisi_qm_uacce_mmap()
2405 sz, pgprot_noncached(vma->vm_page_prot)); in hisi_qm_uacce_mmap()
2407 if (sz != qp->qdma.size) in hisi_qm_uacce_mmap()
2408 return -EINVAL; in hisi_qm_uacce_mmap()
2414 vm_pgoff = vma->vm_pgoff; in hisi_qm_uacce_mmap()
2415 vma->vm_pgoff = 0; in hisi_qm_uacce_mmap()
2416 ret = dma_mmap_coherent(dev, vma, qp->qdma.va, in hisi_qm_uacce_mmap()
2417 qp->qdma.dma, sz); in hisi_qm_uacce_mmap()
2418 vma->vm_pgoff = vm_pgoff; in hisi_qm_uacce_mmap()
2422 return -EINVAL; in hisi_qm_uacce_mmap()
2428 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_start_queue()
2430 return hisi_qm_start_qp(qp, qp->pasid); in hisi_qm_uacce_start_queue()
2435 hisi_qm_stop_qp(q->priv); in hisi_qm_uacce_stop_queue()
2440 struct hisi_qp *qp = q->priv; in hisi_qm_is_q_updated()
2441 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; in hisi_qm_is_q_updated()
2444 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { in hisi_qm_is_q_updated()
2448 cqe = qp->cqe + qp->qp_status.cq_head; in hisi_qm_is_q_updated()
2457 struct hisi_qm *qm = q->uacce->priv; in qm_set_sqctype()
2458 struct hisi_qp *qp = q->priv; in qm_set_sqctype()
2460 down_write(&qm->qps_lock); in qm_set_sqctype()
2461 qp->alg_type = type; in qm_set_sqctype()
2462 up_write(&qm->qps_lock); in qm_set_sqctype()
2468 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_ioctl()
2475 return -EFAULT; in hisi_qm_uacce_ioctl()
2478 return -EINVAL; in hisi_qm_uacce_ioctl()
2481 qp_ctx.id = qp->qp_id; in hisi_qm_uacce_ioctl()
2485 return -EFAULT; in hisi_qm_uacce_ioctl()
2491 return -EFAULT; in hisi_qm_uacce_ioctl()
2493 qp_info.sqe_size = qp->qm->sqe_size; in hisi_qm_uacce_ioctl()
2494 qp_info.sq_depth = qp->sq_depth; in hisi_qm_uacce_ioctl()
2495 qp_info.cq_depth = qp->cq_depth; in hisi_qm_uacce_ioctl()
2499 return -EFAULT; in hisi_qm_uacce_ioctl()
2504 return -EINVAL; in hisi_qm_uacce_ioctl()
2508 * qm_hw_err_isolate() - Try to set the isolation status of the uacce device
2518 isolate = &qm->isolate_data; in qm_hw_err_isolate()
2523 if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold) in qm_hw_err_isolate()
2528 return -ENOMEM; in qm_hw_err_isolate()
2531 * Time-stamp every slot AER error. Then check the AER error log when the in qm_hw_err_isolate()
2536 mutex_lock(&isolate->isolate_lock); in qm_hw_err_isolate()
2537 hw_err->timestamp = jiffies; in qm_hw_err_isolate()
2538 list_for_each_entry_safe(err, tmp, &isolate->qm_hw_errs, list) { in qm_hw_err_isolate()
2539 if ((hw_err->timestamp - err->timestamp) / HZ > in qm_hw_err_isolate()
2541 list_del(&err->list); in qm_hw_err_isolate()
2547 list_add(&hw_err->list, &isolate->qm_hw_errs); in qm_hw_err_isolate()
2548 mutex_unlock(&isolate->isolate_lock); in qm_hw_err_isolate()
2550 if (count >= isolate->err_threshold) in qm_hw_err_isolate()
2551 isolate->is_isolate = true; in qm_hw_err_isolate()
2560 mutex_lock(&qm->isolate_data.isolate_lock); in qm_hw_err_destroy()
2561 list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) { in qm_hw_err_destroy()
2562 list_del(&err->list); in qm_hw_err_destroy()
2565 mutex_unlock(&qm->isolate_data.isolate_lock); in qm_hw_err_destroy()
2570 struct hisi_qm *qm = uacce->priv; in hisi_qm_get_isolate_state()
2573 if (uacce->is_vf) in hisi_qm_get_isolate_state()
2574 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); in hisi_qm_get_isolate_state()
2578 return pf_qm->isolate_data.is_isolate ? in hisi_qm_get_isolate_state()
2584 struct hisi_qm *qm = uacce->priv; in hisi_qm_isolate_threshold_write()
2587 if (uacce->is_vf) in hisi_qm_isolate_threshold_write()
2588 return -EPERM; in hisi_qm_isolate_threshold_write()
2590 if (qm->isolate_data.is_isolate) in hisi_qm_isolate_threshold_write()
2591 return -EPERM; in hisi_qm_isolate_threshold_write()
2593 qm->isolate_data.err_threshold = num; in hisi_qm_isolate_threshold_write()
2603 struct hisi_qm *qm = uacce->priv; in hisi_qm_isolate_threshold_read()
2606 if (uacce->is_vf) { in hisi_qm_isolate_threshold_read()
2607 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); in hisi_qm_isolate_threshold_read()
2608 return pf_qm->isolate_data.err_threshold; in hisi_qm_isolate_threshold_read()
2611 return qm->isolate_data.err_threshold; in hisi_qm_isolate_threshold_read()
2630 struct uacce_device *uacce = qm->uacce; in qm_remove_uacce()
2632 if (qm->use_sva) { in qm_remove_uacce()
2635 qm->uacce = NULL; in qm_remove_uacce()
2641 struct pci_dev *pdev = qm->pdev; in qm_alloc_uacce()
2652 ret = strscpy(interface.name, dev_driver_string(&pdev->dev), in qm_alloc_uacce()
2655 return -ENAMETOOLONG; in qm_alloc_uacce()
2657 uacce = uacce_alloc(&pdev->dev, &interface); in qm_alloc_uacce()
2661 if (uacce->flags & UACCE_DEV_SVA) { in qm_alloc_uacce()
2662 qm->use_sva = true; in qm_alloc_uacce()
2666 return -EINVAL; in qm_alloc_uacce()
2669 uacce->is_vf = pdev->is_virtfn; in qm_alloc_uacce()
2670 uacce->priv = qm; in qm_alloc_uacce()
2672 if (qm->ver == QM_HW_V1) in qm_alloc_uacce()
2673 uacce->api_ver = HISI_QM_API_VER_BASE; in qm_alloc_uacce()
2674 else if (qm->ver == QM_HW_V2) in qm_alloc_uacce()
2675 uacce->api_ver = HISI_QM_API_VER2_BASE; in qm_alloc_uacce()
2677 uacce->api_ver = HISI_QM_API_VER3_BASE; in qm_alloc_uacce()
2679 if (qm->ver == QM_HW_V1) in qm_alloc_uacce()
2681 else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) in qm_alloc_uacce()
2685 mmio_page_nr = qm->db_interval / PAGE_SIZE; in qm_alloc_uacce()
2690 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + in qm_alloc_uacce()
2694 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; in qm_alloc_uacce()
2695 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; in qm_alloc_uacce()
2697 qm->uacce = uacce; in qm_alloc_uacce()
2698 INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs); in qm_alloc_uacce()
2699 mutex_init(&qm->isolate_data.isolate_lock); in qm_alloc_uacce()
2705 * qm_frozen() - Try to froze QM to cut continuous queue request. If
2713 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) in qm_frozen()
2716 down_write(&qm->qps_lock); in qm_frozen()
2718 if (!qm->qp_in_used) { in qm_frozen()
2719 qm->qp_in_used = qm->qp_num; in qm_frozen()
2720 up_write(&qm->qps_lock); in qm_frozen()
2721 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl); in qm_frozen()
2725 up_write(&qm->qps_lock); in qm_frozen()
2727 return -EBUSY; in qm_frozen()
2738 return -EINVAL; in qm_try_frozen_vfs()
2741 mutex_lock(&qm_list->lock); in qm_try_frozen_vfs()
2742 list_for_each_entry(qm, &qm_list->list, list) { in qm_try_frozen_vfs()
2743 dev = qm->pdev; in qm_try_frozen_vfs()
2755 mutex_unlock(&qm_list->lock); in qm_try_frozen_vfs()
2761 * hisi_qm_wait_task_finish() - Wait until the task is finished
2769 ((qm->fun_type == QM_HW_PF) && in hisi_qm_wait_task_finish()
2770 qm_try_frozen_vfs(qm->pdev, qm_list))) { in hisi_qm_wait_task_finish()
2774 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) || in hisi_qm_wait_task_finish()
2775 test_bit(QM_RESETTING, &qm->misc_ctl)) in hisi_qm_wait_task_finish()
2778 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in hisi_qm_wait_task_finish()
2779 flush_work(&qm->cmd_process); in hisi_qm_wait_task_finish()
2787 struct device *dev = &qm->pdev->dev; in hisi_qp_memory_uninit()
2791 for (i = num - 1; i >= 0; i--) { in hisi_qp_memory_uninit()
2792 qdma = &qm->qp_array[i].qdma; in hisi_qp_memory_uninit()
2793 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); in hisi_qp_memory_uninit()
2794 kfree(qm->poll_data[i].qp_finish_id); in hisi_qp_memory_uninit()
2797 kfree(qm->poll_data); in hisi_qp_memory_uninit()
2798 kfree(qm->qp_array); in hisi_qp_memory_uninit()
2804 struct device *dev = &qm->pdev->dev; in hisi_qp_memory_init()
2805 size_t off = qm->sqe_size * sq_depth; in hisi_qp_memory_init()
2807 int ret = -ENOMEM; in hisi_qp_memory_init()
2809 qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16), in hisi_qp_memory_init()
2811 if (!qm->poll_data[id].qp_finish_id) in hisi_qp_memory_init()
2812 return -ENOMEM; in hisi_qp_memory_init()
2814 qp = &qm->qp_array[id]; in hisi_qp_memory_init()
2815 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, in hisi_qp_memory_init()
2817 if (!qp->qdma.va) in hisi_qp_memory_init()
2820 qp->sqe = qp->qdma.va; in hisi_qp_memory_init()
2821 qp->sqe_dma = qp->qdma.dma; in hisi_qp_memory_init()
2822 qp->cqe = qp->qdma.va + off; in hisi_qp_memory_init()
2823 qp->cqe_dma = qp->qdma.dma + off; in hisi_qp_memory_init()
2824 qp->qdma.size = dma_size; in hisi_qp_memory_init()
2825 qp->sq_depth = sq_depth; in hisi_qp_memory_init()
2826 qp->cq_depth = cq_depth; in hisi_qp_memory_init()
2827 qp->qm = qm; in hisi_qp_memory_init()
2828 qp->qp_id = id; in hisi_qp_memory_init()
2833 kfree(qm->poll_data[id].qp_finish_id); in hisi_qp_memory_init()
2839 struct pci_dev *pdev = qm->pdev; in hisi_qm_pre_init()
2841 if (qm->ver == QM_HW_V1) in hisi_qm_pre_init()
2842 qm->ops = &qm_hw_ops_v1; in hisi_qm_pre_init()
2843 else if (qm->ver == QM_HW_V2) in hisi_qm_pre_init()
2844 qm->ops = &qm_hw_ops_v2; in hisi_qm_pre_init()
2846 qm->ops = &qm_hw_ops_v3; in hisi_qm_pre_init()
2849 mutex_init(&qm->mailbox_lock); in hisi_qm_pre_init()
2850 init_rwsem(&qm->qps_lock); in hisi_qm_pre_init()
2851 qm->qp_in_used = 0; in hisi_qm_pre_init()
2852 if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { in hisi_qm_pre_init()
2853 if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) in hisi_qm_pre_init()
2854 dev_info(&pdev->dev, "_PS0 and _PR0 are not defined"); in hisi_qm_pre_init()
2862 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in qm_cmd_uninit()
2865 val = readl(qm->io_base + QM_IFC_INT_MASK); in qm_cmd_uninit()
2867 writel(val, qm->io_base + QM_IFC_INT_MASK); in qm_cmd_uninit()
2874 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in qm_cmd_init()
2881 val = readl(qm->io_base + QM_IFC_INT_MASK); in qm_cmd_init()
2883 writel(val, qm->io_base + QM_IFC_INT_MASK); in qm_cmd_init()
2888 struct pci_dev *pdev = qm->pdev; in qm_put_pci_res()
2890 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) in qm_put_pci_res()
2891 iounmap(qm->db_io_base); in qm_put_pci_res()
2893 iounmap(qm->io_base); in qm_put_pci_res()
2899 struct pci_dev *pdev = qm->pdev; in hisi_qm_pci_uninit()
2908 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF) in hisi_qm_set_state()
2909 writel(state, qm->io_base + QM_VF_STATE); in hisi_qm_set_state()
2914 destroy_workqueue(qm->wq); in hisi_qm_unint_work()
2919 struct device *dev = &qm->pdev->dev; in hisi_qm_memory_uninit()
2921 hisi_qp_memory_uninit(qm, qm->qp_num); in hisi_qm_memory_uninit()
2922 if (qm->qdma.va) { in hisi_qm_memory_uninit()
2924 dma_free_coherent(dev, qm->qdma.size, in hisi_qm_memory_uninit()
2925 qm->qdma.va, qm->qdma.dma); in hisi_qm_memory_uninit()
2928 idr_destroy(&qm->qp_idr); in hisi_qm_memory_uninit()
2930 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in hisi_qm_memory_uninit()
2931 kfree(qm->factor); in hisi_qm_memory_uninit()
2935 * hisi_qm_uninit() - Uninitialize qm.
2944 down_write(&qm->qps_lock); in hisi_qm_uninit()
2947 up_write(&qm->qps_lock); in hisi_qm_uninit()
2953 up_write(&qm->qps_lock); in hisi_qm_uninit()
2957 if (qm->use_sva) { in hisi_qm_uninit()
2958 uacce_remove(qm->uacce); in hisi_qm_uninit()
2959 qm->uacce = NULL; in hisi_qm_uninit()
2965 * hisi_qm_get_vft() - Get vft from a qm.
2979 return -EINVAL; in hisi_qm_get_vft()
2981 if (!qm->ops->get_vft) { in hisi_qm_get_vft()
2982 dev_err(&qm->pdev->dev, "Don't support vft read!\n"); in hisi_qm_get_vft()
2983 return -EINVAL; in hisi_qm_get_vft()
2986 return qm->ops->get_vft(qm, base, number); in hisi_qm_get_vft()
2990 * hisi_qm_set_vft() - Set vft to a qm.
2999 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
3000 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
3006 u32 max_q_num = qm->ctrl_qp_num; in hisi_qm_set_vft()
3010 return -EINVAL; in hisi_qm_set_vft()
3017 struct hisi_qm_status *status = &qm->status; in qm_init_eq_aeq_status()
3019 status->eq_head = 0; in qm_init_eq_aeq_status()
3020 status->aeq_head = 0; in qm_init_eq_aeq_status()
3021 status->eqc_phase = true; in qm_init_eq_aeq_status()
3022 status->aeqc_phase = true; in qm_init_eq_aeq_status()
3028 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); in qm_enable_eq_aeq_interrupts()
3029 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_enable_eq_aeq_interrupts()
3031 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); in qm_enable_eq_aeq_interrupts()
3032 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); in qm_enable_eq_aeq_interrupts()
3037 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); in qm_disable_eq_aeq_interrupts()
3038 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); in qm_disable_eq_aeq_interrupts()
3043 struct device *dev = &qm->pdev->dev; in qm_eq_ctx_cfg()
3050 return -ENOMEM; in qm_eq_ctx_cfg()
3052 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); in qm_eq_ctx_cfg()
3053 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); in qm_eq_ctx_cfg()
3054 if (qm->ver == QM_HW_V1) in qm_eq_ctx_cfg()
3055 eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); in qm_eq_ctx_cfg()
3056 eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); in qm_eq_ctx_cfg()
3062 return -ENOMEM; in qm_eq_ctx_cfg()
3074 struct device *dev = &qm->pdev->dev; in qm_aeq_ctx_cfg()
3081 return -ENOMEM; in qm_aeq_ctx_cfg()
3083 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); in qm_aeq_ctx_cfg()
3084 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); in qm_aeq_ctx_cfg()
3085 aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); in qm_aeq_ctx_cfg()
3091 return -ENOMEM; in qm_aeq_ctx_cfg()
3103 struct device *dev = &qm->pdev->dev; in qm_eq_aeq_ctx_cfg()
3121 WARN_ON(!qm->qdma.va); in __hisi_qm_start()
3123 if (qm->fun_type == QM_HW_PF) { in __hisi_qm_start()
3124 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); in __hisi_qm_start()
3133 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); in __hisi_qm_start()
3137 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); in __hisi_qm_start()
3148 * hisi_qm_start() - start qm
3155 struct device *dev = &qm->pdev->dev; in hisi_qm_start()
3158 down_write(&qm->qps_lock); in hisi_qm_start()
3161 up_write(&qm->qps_lock); in hisi_qm_start()
3162 return -EPERM; in hisi_qm_start()
3165 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num); in hisi_qm_start()
3167 if (!qm->qp_num) { in hisi_qm_start()
3169 ret = -EINVAL; in hisi_qm_start()
3175 atomic_set(&qm->status.flags, QM_START); in hisi_qm_start()
3179 up_write(&qm->qps_lock); in hisi_qm_start()
3186 struct device *dev = &qm->pdev->dev; in qm_restart()
3194 down_write(&qm->qps_lock); in qm_restart()
3195 for (i = 0; i < qm->qp_num; i++) { in qm_restart()
3196 qp = &qm->qp_array[i]; in qm_restart()
3197 if (atomic_read(&qp->qp_status.flags) == QP_STOP && in qm_restart()
3198 qp->is_resetting == true) { in qm_restart()
3203 up_write(&qm->qps_lock); in qm_restart()
3206 qp->is_resetting = false; in qm_restart()
3209 up_write(&qm->qps_lock); in qm_restart()
3217 struct device *dev = &qm->pdev->dev; in qm_stop_started_qp()
3221 for (i = 0; i < qm->qp_num; i++) { in qm_stop_started_qp()
3222 qp = &qm->qp_array[i]; in qm_stop_started_qp()
3223 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) { in qm_stop_started_qp()
3224 qp->is_resetting = true; in qm_stop_started_qp()
3237 * qm_clear_queues() - Clear all queues memory in a qm.
3248 for (i = 0; i < qm->qp_num; i++) { in qm_clear_queues()
3249 qp = &qm->qp_array[i]; in qm_clear_queues()
3250 if (qp->is_in_kernel && qp->is_resetting) in qm_clear_queues()
3251 memset(qp->qdma.va, 0, qp->qdma.size); in qm_clear_queues()
3254 memset(qm->qdma.va, 0, qm->qdma.size); in qm_clear_queues()
3258 * hisi_qm_stop() - Stop a qm.
3268 struct device *dev = &qm->pdev->dev; in hisi_qm_stop()
3271 down_write(&qm->qps_lock); in hisi_qm_stop()
3273 qm->status.stop_reason = r; in hisi_qm_stop()
3275 ret = -EPERM; in hisi_qm_stop()
3279 if (qm->status.stop_reason == QM_SOFT_RESET || in hisi_qm_stop()
3280 qm->status.stop_reason == QM_DOWN) { in hisi_qm_stop()
3291 if (qm->fun_type == QM_HW_PF) { in hisi_qm_stop()
3295 ret = -EBUSY; in hisi_qm_stop()
3301 atomic_set(&qm->status.flags, QM_STOP); in hisi_qm_stop()
3304 up_write(&qm->qps_lock); in hisi_qm_stop()
3311 if (!qm->ops->hw_error_init) { in qm_hw_error_init()
3312 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); in qm_hw_error_init()
3316 qm->ops->hw_error_init(qm); in qm_hw_error_init()
3321 if (!qm->ops->hw_error_uninit) { in qm_hw_error_uninit()
3322 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n"); in qm_hw_error_uninit()
3326 qm->ops->hw_error_uninit(qm); in qm_hw_error_uninit()
3331 if (!qm->ops->hw_error_handle) { in qm_hw_error_handle()
3332 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); in qm_hw_error_handle()
3336 return qm->ops->hw_error_handle(qm); in qm_hw_error_handle()
3340 * hisi_qm_dev_err_init() - Initialize device error configuration.
3347 if (qm->fun_type == QM_HW_VF) in hisi_qm_dev_err_init()
3352 if (!qm->err_ini->hw_err_enable) { in hisi_qm_dev_err_init()
3353 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n"); in hisi_qm_dev_err_init()
3356 qm->err_ini->hw_err_enable(qm); in hisi_qm_dev_err_init()
3361 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
3368 if (qm->fun_type == QM_HW_VF) in hisi_qm_dev_err_uninit()
3373 if (!qm->err_ini->hw_err_disable) { in hisi_qm_dev_err_uninit()
3374 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n"); in hisi_qm_dev_err_uninit()
3377 qm->err_ini->hw_err_disable(qm); in hisi_qm_dev_err_uninit()
3382 * hisi_qm_free_qps() - free multiple queue pairs.
3393 for (i = qp_num - 1; i >= 0; i--) in hisi_qm_free_qps()
3403 list_del(&res->list); in free_list()
3417 list_for_each_entry(qm, &qm_list->list, list) { in hisi_qm_sort_devices()
3418 dev = &qm->pdev->dev; in hisi_qm_sort_devices()
3426 return -ENOMEM; in hisi_qm_sort_devices()
3428 res->qm = qm; in hisi_qm_sort_devices()
3429 res->distance = node_distance(dev_node, node); in hisi_qm_sort_devices()
3432 if (res->distance < tmp->distance) { in hisi_qm_sort_devices()
3433 n = &tmp->list; in hisi_qm_sort_devices()
3437 list_add_tail(&res->list, n); in hisi_qm_sort_devices()
3444 * hisi_qm_alloc_qps_node() - Create multiple queue pairs.
3459 int ret = -ENODEV; in hisi_qm_alloc_qps_node()
3464 return -EINVAL; in hisi_qm_alloc_qps_node()
3466 mutex_lock(&qm_list->lock); in hisi_qm_alloc_qps_node()
3468 mutex_unlock(&qm_list->lock); in hisi_qm_alloc_qps_node()
3474 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); in hisi_qm_alloc_qps_node()
3487 mutex_unlock(&qm_list->lock); in hisi_qm_alloc_qps_node()
3501 u32 max_qp_num = qm->max_qp_num; in qm_vf_q_assign()
3502 u32 q_base = qm->qp_num; in qm_vf_q_assign()
3506 return -EINVAL; in qm_vf_q_assign()
3508 vfs_q_num = qm->ctrl_qp_num - qm->qp_num; in qm_vf_q_assign()
3512 return -EINVAL; in qm_vf_q_assign()
3517 for (i = num_vfs; i > 0; i--) { in qm_vf_q_assign()
3527 remain_q_num--; in qm_vf_q_assign()
3535 for (j = num_vfs; j > i; j--) in qm_vf_q_assign()
3550 for (i = 1; i <= qm->vfs_num; i++) { in qm_clear_vft_config()
3555 qm->vfs_num = 0; in qm_clear_vft_config()
3562 struct device *dev = &qm->pdev->dev; in qm_func_shaper_enable()
3566 total_vfs = pci_sriov_get_totalvfs(qm->pdev); in qm_func_shaper_enable()
3568 return -EINVAL; in qm_func_shaper_enable()
3570 qm->factor[fun_index].func_qos = qos; in qm_func_shaper_enable()
3572 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]); in qm_func_shaper_enable()
3575 return -EINVAL; in qm_func_shaper_enable()
3583 return -EINVAL; in qm_func_shaper_enable()
3598 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_get_shaper_vft_qos()
3604 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); in qm_get_shaper_vft_qos()
3605 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE); in qm_get_shaper_vft_qos()
3606 writel(fun_index, qm->io_base + QM_VFT_CFG); in qm_get_shaper_vft_qos()
3608 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); in qm_get_shaper_vft_qos()
3609 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); in qm_get_shaper_vft_qos()
3611 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_get_shaper_vft_qos()
3617 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) | in qm_get_shaper_vft_qos()
3618 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32); in qm_get_shaper_vft_qos()
3629 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE; in qm_get_shaper_vft_qos()
3631 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; in qm_get_shaper_vft_qos()
3633 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate); in qm_get_shaper_vft_qos()
3642 struct device *dev = &qm->pdev->dev; in qm_vf_get_qos()
3662 int ret = -EINVAL; in qm_vf_read_qos()
3665 qm->mb_qos = 0; in qm_vf_read_qos()
3670 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); in qm_vf_read_qos()
3676 if (qm->mb_qos) in qm_vf_read_qos()
3680 pci_err(qm->pdev, "PF ping VF timeout!\n"); in qm_vf_read_qos()
3681 return -ETIMEDOUT; in qm_vf_read_qos()
3691 struct hisi_qm *qm = filp->private_data; in qm_algqos_read()
3701 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_algqos_read()
3702 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n"); in qm_algqos_read()
3703 ret = -EAGAIN; in qm_algqos_read()
3707 if (qm->fun_type == QM_HW_PF) { in qm_algqos_read()
3713 ir = qm->mb_qos; in qm_algqos_read()
3722 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_algqos_read()
3732 const struct bus_type *bus_type = qm->pdev->dev.bus; in qm_get_qos_value()
3741 return -EINVAL; in qm_get_qos_value()
3745 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n"); in qm_get_qos_value()
3746 return -EINVAL; in qm_get_qos_value()
3751 pci_err(qm->pdev, "input pci bdf number is error!\n"); in qm_get_qos_value()
3752 return -ENODEV; in qm_get_qos_value()
3757 *fun_index = pdev->devfn; in qm_get_qos_value()
3765 struct hisi_qm *qm = filp->private_data; in qm_algqos_write()
3775 return -ENOSPC; in qm_algqos_write()
3777 len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count); in qm_algqos_write()
3787 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_algqos_write()
3788 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n"); in qm_algqos_write()
3789 return -EAGAIN; in qm_algqos_write()
3794 ret = -EINVAL; in qm_algqos_write()
3800 pci_err(qm->pdev, "failed to enable function shaper!\n"); in qm_algqos_write()
3801 ret = -EINVAL; in qm_algqos_write()
3805 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n", in qm_algqos_write()
3812 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_algqos_write()
3824 * hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
3831 if (qm->fun_type == QM_HW_PF) in hisi_qm_set_algqos_init()
3832 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, in hisi_qm_set_algqos_init()
3834 else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in hisi_qm_set_algqos_init()
3835 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root, in hisi_qm_set_algqos_init()
3844 qm->factor[i].func_qos = QM_QOS_MAX_VAL; in hisi_qm_init_vf_qos()
3848 * hisi_qm_sriov_enable() - enable virtual functions
3868 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n", in hisi_qm_sriov_enable()
3875 ret = -ERANGE; in hisi_qm_sriov_enable()
3881 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in hisi_qm_sriov_enable()
3890 qm->vfs_num = num_vfs; in hisi_qm_sriov_enable()
3910 * hisi_qm_sriov_disable - disable virtual functions
3923 return -EPERM; in hisi_qm_sriov_disable()
3927 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) { in hisi_qm_sriov_disable()
3929 return -EBUSY; in hisi_qm_sriov_disable()
3945 * hisi_qm_sriov_configure - configure the number of VFs
3949 * Enable SR-IOV according to num_vfs, 0 means disable.
3964 if (!qm->err_ini->get_dev_hw_err_status) { in qm_dev_err_handle()
3965 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n"); in qm_dev_err_handle()
3970 err_sts = qm->err_ini->get_dev_hw_err_status(qm); in qm_dev_err_handle()
3972 if (err_sts & qm->err_info.ecc_2bits_mask) in qm_dev_err_handle()
3973 qm->err_status.is_dev_ecc_mbit = true; in qm_dev_err_handle()
3975 if (qm->err_ini->log_dev_hw_err) in qm_dev_err_handle()
3976 qm->err_ini->log_dev_hw_err(qm, err_sts); in qm_dev_err_handle()
3978 if (err_sts & qm->err_info.dev_reset_mask) in qm_dev_err_handle()
3981 if (qm->err_ini->clear_dev_hw_err_status) in qm_dev_err_handle()
3982 qm->err_ini->clear_dev_hw_err_status(qm, err_sts); in qm_dev_err_handle()
4004 * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
4017 if (pdev->is_virtfn) in hisi_qm_dev_err_detected()
4034 struct pci_dev *pdev = qm->pdev; in qm_check_req_recv()
4038 if (qm->ver >= QM_HW_V3) in qm_check_req_recv()
4041 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); in qm_check_req_recv()
4042 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, in qm_check_req_recv()
4046 dev_err(&pdev->dev, "Fails to read QM reg!\n"); in qm_check_req_recv()
4050 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); in qm_check_req_recv()
4051 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, in qm_check_req_recv()
4055 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); in qm_check_req_recv()
4062 struct pci_dev *pdev = qm->pdev; in qm_set_pf_mse()
4081 return -ETIMEDOUT; in qm_set_pf_mse()
4086 struct pci_dev *pdev = qm->pdev; in qm_set_vf_mse()
4108 return -ETIMEDOUT; in qm_set_vf_mse()
4114 struct hisi_qm_list *qm_list = qm->qm_list; in qm_vf_reset_prepare()
4115 struct pci_dev *pdev = qm->pdev; in qm_vf_reset_prepare()
4120 mutex_lock(&qm_list->lock); in qm_vf_reset_prepare()
4121 list_for_each_entry(vf_qm, &qm_list->list, list) { in qm_vf_reset_prepare()
4122 virtfn = vf_qm->pdev; in qm_vf_reset_prepare()
4137 mutex_unlock(&qm_list->lock); in qm_vf_reset_prepare()
4144 struct pci_dev *pdev = qm->pdev; in qm_try_stop_vfs()
4147 if (!qm->vfs_num) in qm_try_stop_vfs()
4151 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { in qm_try_stop_vfs()
4166 struct pci_dev *pdev = qm->pdev; in qm_controller_reset_prepare()
4190 if (qm->use_sva) { in qm_controller_reset_prepare()
4200 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in qm_controller_reset_prepare()
4210 if (qm->ver >= QM_HW_V3) in qm_dev_ecc_mbit_handle()
4213 if (!qm->err_status.is_dev_ecc_mbit && in qm_dev_ecc_mbit_handle()
4214 qm->err_status.is_qm_ecc_mbit && in qm_dev_ecc_mbit_handle()
4215 qm->err_ini->close_axi_master_ooo) { in qm_dev_ecc_mbit_handle()
4216 qm->err_ini->close_axi_master_ooo(qm); in qm_dev_ecc_mbit_handle()
4217 } else if (qm->err_status.is_dev_ecc_mbit && in qm_dev_ecc_mbit_handle()
4218 !qm->err_status.is_qm_ecc_mbit && in qm_dev_ecc_mbit_handle()
4219 !qm->err_ini->close_axi_master_ooo) { in qm_dev_ecc_mbit_handle()
4220 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); in qm_dev_ecc_mbit_handle()
4222 qm->io_base + QM_RAS_NFE_ENABLE); in qm_dev_ecc_mbit_handle()
4223 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); in qm_dev_ecc_mbit_handle()
4229 struct pci_dev *pdev = qm->pdev; in qm_soft_reset()
4238 if (qm->vfs_num) { in qm_soft_reset()
4246 ret = qm->ops->set_msi(qm, false); in qm_soft_reset()
4256 qm->io_base + ACC_MASTER_GLOBAL_CTRL); in qm_soft_reset()
4258 /* If bus lock, reset chip */ in qm_soft_reset()
4259 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, in qm_soft_reset()
4264 pci_emerg(pdev, "Bus lock! Please reset system.\n"); in qm_soft_reset()
4268 if (qm->err_ini->close_sva_prefetch) in qm_soft_reset()
4269 qm->err_ini->close_sva_prefetch(qm); in qm_soft_reset()
4277 /* The reset related sub-control registers are not in PCI BAR */ in qm_soft_reset()
4278 if (ACPI_HANDLE(&pdev->dev)) { in qm_soft_reset()
4282 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), in qm_soft_reset()
4283 qm->err_info.acpi_rst, in qm_soft_reset()
4287 return -EIO; in qm_soft_reset()
4292 return -EIO; in qm_soft_reset()
4296 return -EINVAL; in qm_soft_reset()
4304 struct hisi_qm_list *qm_list = qm->qm_list; in qm_vf_reset_done()
4305 struct pci_dev *pdev = qm->pdev; in qm_vf_reset_done()
4310 mutex_lock(&qm_list->lock); in qm_vf_reset_done()
4311 list_for_each_entry(vf_qm, &qm_list->list, list) { in qm_vf_reset_done()
4312 virtfn = vf_qm->pdev; in qm_vf_reset_done()
4327 mutex_unlock(&qm_list->lock); in qm_vf_reset_done()
4333 struct pci_dev *pdev = qm->pdev; in qm_try_start_vfs()
4336 if (!qm->vfs_num) in qm_try_start_vfs()
4339 ret = qm_vf_q_assign(qm, qm->vfs_num); in qm_try_start_vfs()
4346 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { in qm_try_start_vfs()
4361 return qm->err_ini->hw_init(qm); in qm_dev_hw_init()
4368 if (qm->err_ini->open_sva_prefetch) in qm_restart_prepare()
4369 qm->err_ini->open_sva_prefetch(qm); in qm_restart_prepare()
4371 if (qm->ver >= QM_HW_V3) in qm_restart_prepare()
4374 if (!qm->err_status.is_qm_ecc_mbit && in qm_restart_prepare()
4375 !qm->err_status.is_dev_ecc_mbit) in qm_restart_prepare()
4379 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_prepare()
4380 writel(value & ~qm->err_info.msi_wr_port, in qm_restart_prepare()
4381 qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_prepare()
4384 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; in qm_restart_prepare()
4385 if (value && qm->err_ini->clear_dev_hw_err_status) in qm_restart_prepare()
4386 qm->err_ini->clear_dev_hw_err_status(qm, value); in qm_restart_prepare()
4389 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_restart_prepare()
4392 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); in qm_restart_prepare()
4399 if (qm->ver >= QM_HW_V3) in qm_restart_done()
4402 if (!qm->err_status.is_qm_ecc_mbit && in qm_restart_done()
4403 !qm->err_status.is_dev_ecc_mbit) in qm_restart_done()
4407 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_done()
4408 value |= qm->err_info.msi_wr_port; in qm_restart_done()
4409 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_done()
4412 qm->err_status.is_qm_ecc_mbit = false; in qm_restart_done()
4413 qm->err_status.is_dev_ecc_mbit = false; in qm_restart_done()
4418 struct pci_dev *pdev = qm->pdev; in qm_controller_reset_done()
4421 ret = qm->ops->set_msi(qm, true); in qm_controller_reset_done()
4433 if (qm->vfs_num) { in qm_controller_reset_done()
4449 if (qm->err_ini->open_axi_master_ooo) in qm_controller_reset_done()
4450 qm->err_ini->open_axi_master_ooo(qm); in qm_controller_reset_done()
4482 struct pci_dev *pdev = qm->pdev; in qm_controller_reset()
4491 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in qm_controller_reset()
4496 if (qm->err_ini->show_last_dfx_regs) in qm_controller_reset()
4497 qm->err_ini->show_last_dfx_regs(qm); in qm_controller_reset()
4516 if (qm->use_sva) in qm_controller_reset()
4517 qm->isolate_data.is_isolate = true; in qm_controller_reset()
4522 * hisi_qm_dev_slot_reset() - slot reset
4533 if (pdev->is_virtfn) in hisi_qm_dev_slot_reset()
4573 if (qm->fun_type == QM_HW_PF) in hisi_qm_reset_prepare()
4602 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); in qm_flr_reset_complete()
4617 if (qm->fun_type == QM_HW_PF) { in hisi_qm_reset_done()
4642 if (qm->fun_type == QM_HW_PF) in hisi_qm_reset_done()
4657 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); in qm_abnormal_irq()
4660 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) && in qm_abnormal_irq()
4661 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl)) in qm_abnormal_irq()
4662 schedule_work(&qm->rst_work); in qm_abnormal_irq()
4668 * hisi_qm_dev_shutdown() - Shutdown device.
4680 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); in hisi_qm_dev_shutdown()
4693 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in hisi_qm_controller_reset()
4700 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); in hisi_qm_controller_reset()
4709 struct pci_dev *pdev = qm->pdev; in qm_pf_reset_vf_prepare()
4714 dev_err(&pdev->dev, "reset prepare not ready!\n"); in qm_pf_reset_vf_prepare()
4715 atomic_set(&qm->status.flags, QM_STOP); in qm_pf_reset_vf_prepare()
4722 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret); in qm_pf_reset_vf_prepare()
4723 atomic_set(&qm->status.flags, QM_STOP); in qm_pf_reset_vf_prepare()
4737 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n"); in qm_pf_reset_vf_prepare()
4743 struct pci_dev *pdev = qm->pdev; in qm_pf_reset_vf_done()
4749 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret); in qm_pf_reset_vf_done()
4756 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n"); in qm_pf_reset_vf_done()
4763 struct device *dev = &qm->pdev->dev; in qm_wait_pf_reset_finish()
4769 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val, in qm_wait_pf_reset_finish()
4775 return -ETIMEDOUT; in qm_wait_pf_reset_finish()
4792 ret = -EINVAL; in qm_wait_pf_reset_finish()
4801 struct device *dev = &qm->pdev->dev; in qm_pf_reset_vf_process()
4827 struct device *dev = &qm->pdev->dev; in qm_handle_cmd_msg()
4855 qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT; in qm_handle_cmd_msg()
4867 u32 vfs_num = qm->vfs_num; in qm_cmd_process()
4871 if (qm->fun_type == QM_HW_PF) { in qm_cmd_process()
4872 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); in qm_cmd_process()
4888 * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
4897 struct device *dev = &qm->pdev->dev; in hisi_qm_alg_register()
4901 mutex_lock(&qm_list->lock); in hisi_qm_alg_register()
4902 if (list_empty(&qm_list->list)) in hisi_qm_alg_register()
4904 list_add_tail(&qm->list, &qm_list->list); in hisi_qm_alg_register()
4905 mutex_unlock(&qm_list->lock); in hisi_qm_alg_register()
4907 if (qm->ver <= QM_HW_V2 && qm->use_sva) { in hisi_qm_alg_register()
4913 ret = qm_list->register_to_crypto(qm); in hisi_qm_alg_register()
4915 mutex_lock(&qm_list->lock); in hisi_qm_alg_register()
4916 list_del(&qm->list); in hisi_qm_alg_register()
4917 mutex_unlock(&qm_list->lock); in hisi_qm_alg_register()
4926 * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
4936 mutex_lock(&qm_list->lock); in hisi_qm_alg_unregister()
4937 list_del(&qm->list); in hisi_qm_alg_unregister()
4938 mutex_unlock(&qm_list->lock); in hisi_qm_alg_unregister()
4940 if (qm->ver <= QM_HW_V2 && qm->use_sva) in hisi_qm_alg_unregister()
4943 if (list_empty(&qm_list->list)) in hisi_qm_alg_unregister()
4944 qm_list->unregister_from_crypto(qm); in hisi_qm_alg_unregister()
4950 struct pci_dev *pdev = qm->pdev; in qm_unregister_abnormal_irq()
4953 if (qm->fun_type == QM_HW_VF) in qm_unregister_abnormal_irq()
4956 val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; in qm_unregister_abnormal_irq()
4966 struct pci_dev *pdev = qm->pdev; in qm_register_abnormal_irq()
4970 if (qm->fun_type == QM_HW_VF) in qm_register_abnormal_irq()
4973 val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; in qm_register_abnormal_irq()
4978 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); in qm_register_abnormal_irq()
4980 dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret); in qm_register_abnormal_irq()
4987 struct pci_dev *pdev = qm->pdev; in qm_unregister_mb_cmd_irq()
4990 val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val; in qm_unregister_mb_cmd_irq()
5000 struct pci_dev *pdev = qm->pdev; in qm_register_mb_cmd_irq()
5004 val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val; in qm_register_mb_cmd_irq()
5009 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); in qm_register_mb_cmd_irq()
5011 dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret); in qm_register_mb_cmd_irq()
5018 struct pci_dev *pdev = qm->pdev; in qm_unregister_aeq_irq()
5021 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val; in qm_unregister_aeq_irq()
5031 struct pci_dev *pdev = qm->pdev; in qm_register_aeq_irq()
5035 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val; in qm_register_aeq_irq()
5041 qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm); in qm_register_aeq_irq()
5043 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); in qm_register_aeq_irq()
5050 struct pci_dev *pdev = qm->pdev; in qm_unregister_eq_irq()
5053 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val; in qm_unregister_eq_irq()
5063 struct pci_dev *pdev = qm->pdev; in qm_register_eq_irq()
5067 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val; in qm_register_eq_irq()
5072 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm); in qm_register_eq_irq()
5074 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); in qm_register_eq_irq()
5120 struct device *dev = &qm->pdev->dev; in qm_get_qp_num()
5124 if (qm->fun_type == QM_HW_VF) { in qm_get_qp_num()
5125 if (qm->ver != QM_HW_V1) in qm_get_qp_num()
5127 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); in qm_get_qp_num()
5132 is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); in qm_get_qp_num()
5133 qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true); in qm_get_qp_num()
5134 qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, in qm_get_qp_num()
5137 if (qm->qp_num <= qm->max_qp_num) in qm_get_qp_num()
5140 if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) { in qm_get_qp_num()
5143 qm->qp_num, qm->max_qp_num); in qm_get_qp_num()
5144 return -EINVAL; in qm_get_qp_num()
5148 qm->qp_num, qm->max_qp_num); in qm_get_qp_num()
5149 qm->qp_num = qm->max_qp_num; in qm_get_qp_num()
5150 qm->debug.curr_qm_qp_num = qm->qp_num; in qm_get_qp_num()
5158 struct pci_dev *pdev = qm->pdev; in qm_pre_store_irq_type_caps()
5162 qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL); in qm_pre_store_irq_type_caps()
5164 return -ENOMEM; in qm_pre_store_irq_type_caps()
5169 qm_pre_store_caps[i], qm->cap_ver); in qm_pre_store_irq_type_caps()
5172 qm->cap_tables.qm_cap_table = qm_cap; in qm_pre_store_irq_type_caps()
5179 const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ? in qm_get_hw_caps()
5181 u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) : in qm_get_hw_caps()
5188 set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); in qm_get_hw_caps()
5190 if (qm->ver >= QM_HW_V3) { in qm_get_hw_caps()
5191 val = readl(qm->io_base + QM_FUNC_CAPS_REG); in qm_get_hw_caps()
5192 qm->cap_ver = val & QM_CAPBILITY_VERSION; in qm_get_hw_caps()
5197 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver); in qm_get_hw_caps()
5199 set_bit(qm_cap_info_comm[i].type, &qm->caps); in qm_get_hw_caps()
5204 val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver); in qm_get_hw_caps()
5206 set_bit(cap_info[i].type, &qm->caps); in qm_get_hw_caps()
5215 struct pci_dev *pdev = qm->pdev; in qm_get_pci_res()
5216 struct device *dev = &pdev->dev; in qm_get_pci_res()
5219 ret = pci_request_mem_regions(pdev, qm->dev_name); in qm_get_pci_res()
5225 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); in qm_get_pci_res()
5226 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2)); in qm_get_pci_res()
5227 if (!qm->io_base) { in qm_get_pci_res()
5228 ret = -EIO; in qm_get_pci_res()
5236 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { in qm_get_pci_res()
5237 qm->db_interval = QM_QP_DB_INTERVAL; in qm_get_pci_res()
5238 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); in qm_get_pci_res()
5239 qm->db_io_base = ioremap(qm->db_phys_base, in qm_get_pci_res()
5241 if (!qm->db_io_base) { in qm_get_pci_res()
5242 ret = -EIO; in qm_get_pci_res()
5246 qm->db_phys_base = qm->phys_base; in qm_get_pci_res()
5247 qm->db_io_base = qm->io_base; in qm_get_pci_res()
5248 qm->db_interval = 0; in qm_get_pci_res()
5258 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) in qm_get_pci_res()
5259 iounmap(qm->db_io_base); in qm_get_pci_res()
5261 iounmap(qm->io_base); in qm_get_pci_res()
5269 struct pci_dev *pdev = qm->pdev; in hisi_qm_pci_init()
5270 struct device *dev = &pdev->dev; in hisi_qm_pci_init()
5309 for (i = 0; i < qm->qp_num; i++) in hisi_qm_init_work()
5310 INIT_WORK(&qm->poll_data[i].work, qm_work_process); in hisi_qm_init_work()
5312 if (qm->fun_type == QM_HW_PF) in hisi_qm_init_work()
5313 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); in hisi_qm_init_work()
5315 if (qm->ver > QM_HW_V2) in hisi_qm_init_work()
5316 INIT_WORK(&qm->cmd_process, qm_cmd_process); in hisi_qm_init_work()
5318 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | in hisi_qm_init_work()
5320 pci_name(qm->pdev)); in hisi_qm_init_work()
5321 if (!qm->wq) { in hisi_qm_init_work()
5322 pci_err(qm->pdev, "failed to alloc workqueue!\n"); in hisi_qm_init_work()
5323 return -ENOMEM; in hisi_qm_init_work()
5331 struct device *dev = &qm->pdev->dev; in hisi_qp_alloc_memory()
5336 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); in hisi_qp_alloc_memory()
5337 if (!qm->qp_array) in hisi_qp_alloc_memory()
5338 return -ENOMEM; in hisi_qp_alloc_memory()
5340 qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); in hisi_qp_alloc_memory()
5341 if (!qm->poll_data) { in hisi_qp_alloc_memory()
5342 kfree(qm->qp_array); in hisi_qp_alloc_memory()
5343 return -ENOMEM; in hisi_qp_alloc_memory()
5349 qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; in hisi_qp_alloc_memory()
5351 for (i = 0; i < qm->qp_num; i++) { in hisi_qp_alloc_memory()
5352 qm->poll_data[i].qm = qm; in hisi_qp_alloc_memory()
5369 struct device *dev = &qm->pdev->dev; in hisi_qm_memory_init()
5373 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { in hisi_qm_memory_init()
5374 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; in hisi_qm_memory_init()
5375 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); in hisi_qm_memory_init()
5376 if (!qm->factor) in hisi_qm_memory_init()
5377 return -ENOMEM; in hisi_qm_memory_init()
5380 qm->factor[0].func_qos = QM_QOS_MAX_VAL; in hisi_qm_memory_init()
5384 (qm)->type = ((qm)->qdma.va + (off)); \ in hisi_qm_memory_init()
5385 (qm)->type##_dma = (qm)->qdma.dma + (off); \ in hisi_qm_memory_init()
5389 idr_init(&qm->qp_idr); in hisi_qm_memory_init()
5390 qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); in hisi_qm_memory_init()
5391 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + in hisi_qm_memory_init()
5392 QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + in hisi_qm_memory_init()
5393 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + in hisi_qm_memory_init()
5394 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); in hisi_qm_memory_init()
5395 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, in hisi_qm_memory_init()
5397 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); in hisi_qm_memory_init()
5398 if (!qm->qdma.va) { in hisi_qm_memory_init()
5399 ret = -ENOMEM; in hisi_qm_memory_init()
5403 QM_INIT_BUF(qm, eqe, qm->eq_depth); in hisi_qm_memory_init()
5404 QM_INIT_BUF(qm, aeqe, qm->aeq_depth); in hisi_qm_memory_init()
5405 QM_INIT_BUF(qm, sqc, qm->qp_num); in hisi_qm_memory_init()
5406 QM_INIT_BUF(qm, cqc, qm->qp_num); in hisi_qm_memory_init()
5415 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); in hisi_qm_memory_init()
5417 idr_destroy(&qm->qp_idr); in hisi_qm_memory_init()
5418 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in hisi_qm_memory_init()
5419 kfree(qm->factor); in hisi_qm_memory_init()
5425 * hisi_qm_init() - Initialize configures about qm.
5432 struct pci_dev *pdev = qm->pdev; in hisi_qm_init()
5433 struct device *dev = &pdev->dev; in hisi_qm_init()
5446 if (qm->fun_type == QM_HW_PF) { in hisi_qm_init()
5448 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); in hisi_qm_init()
5457 if (qm->mode == UACCE_MODE_SVA) { in hisi_qm_init()
5472 atomic_set(&qm->status.flags, QM_INIT); in hisi_qm_init()
5489 * hisi_qm_get_dfx_access() - Try to get dfx access.
5492 * Try to get dfx access, then user can get message.
5499 struct device *dev = &qm->pdev->dev; in hisi_qm_get_dfx_access()
5502 dev_info(dev, "can not read/write - device in suspended.\n"); in hisi_qm_get_dfx_access()
5503 return -EAGAIN; in hisi_qm_get_dfx_access()
5511 * hisi_qm_put_dfx_access() - Put dfx access.
5514 * Put dfx access, drop runtime PM usage counter.
5523 * hisi_qm_pm_init() - Initialize qm runtime PM.
5530 struct device *dev = &qm->pdev->dev; in hisi_qm_pm_init()
5532 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in hisi_qm_pm_init()
5542 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
5549 struct device *dev = &qm->pdev->dev; in hisi_qm_pm_uninit()
5551 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in hisi_qm_pm_uninit()
5561 struct pci_dev *pdev = qm->pdev; in qm_prepare_for_suspend()
5565 ret = qm->ops->set_msi(qm, false); in qm_prepare_for_suspend()
5573 qm->io_base + ACC_MASTER_GLOBAL_CTRL); in qm_prepare_for_suspend()
5575 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, in qm_prepare_for_suspend()
5580 pci_emerg(pdev, "Bus lock! Please reset system.\n"); in qm_prepare_for_suspend()
5593 struct pci_dev *pdev = qm->pdev; in qm_rebuild_for_resume()
5602 ret = qm->ops->set_msi(qm, true); in qm_rebuild_for_resume()
5617 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); in qm_rebuild_for_resume()
5627 * hisi_qm_suspend() - Runtime suspend of given device.
5655 * hisi_qm_resume() - Runtime resume of given device.