• Home
  • Raw
  • Download

Lines Matching full:qm

370 	struct hisi_qm *qm;  member
386 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
387 void (*qm_db)(struct hisi_qm *qm, u16 qn,
389 int (*debug_init)(struct hisi_qm *qm);
390 void (*hw_error_init)(struct hisi_qm *qm);
391 void (*hw_error_uninit)(struct hisi_qm *qm);
392 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
393 int (*set_msi)(struct hisi_qm *qm, bool set);
457 static void qm_irqs_unregister(struct hisi_qm *qm);
459 static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new) in qm_avail_state() argument
461 enum qm_state curr = atomic_read(&qm->status.flags); in qm_avail_state()
481 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n", in qm_avail_state()
485 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n", in qm_avail_state()
491 static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp, in qm_qp_avail_state() argument
494 enum qm_state qm_curr = atomic_read(&qm->status.flags); in qm_qp_avail_state()
527 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n", in qm_qp_avail_state()
531 dev_warn(&qm->pdev->dev, in qm_qp_avail_state()
532 "Can not change qp state from %s to %s in QM %s\n", in qm_qp_avail_state()
538 static u32 qm_get_hw_error_status(struct hisi_qm *qm) in qm_get_hw_error_status() argument
540 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); in qm_get_hw_error_status()
543 static u32 qm_get_dev_err_status(struct hisi_qm *qm) in qm_get_dev_err_status() argument
545 return qm->err_ini->get_dev_hw_err_status(qm); in qm_get_dev_err_status()
549 static bool qm_check_dev_error(struct hisi_qm *qm) in qm_check_dev_error() argument
553 if (qm->fun_type == QM_HW_VF) in qm_check_dev_error()
556 val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask; in qm_check_dev_error()
557 dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask; in qm_check_dev_error()
562 static int qm_wait_reset_finish(struct hisi_qm *qm) in qm_wait_reset_finish() argument
567 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_wait_reset_finish()
576 static int qm_reset_prepare_ready(struct hisi_qm *qm) in qm_reset_prepare_ready() argument
578 struct pci_dev *pdev = qm->pdev; in qm_reset_prepare_ready()
585 if (qm->ver < QM_HW_V3) in qm_reset_prepare_ready()
588 return qm_wait_reset_finish(qm); in qm_reset_prepare_ready()
591 static void qm_reset_bit_clear(struct hisi_qm *qm) in qm_reset_bit_clear() argument
593 struct pci_dev *pdev = qm->pdev; in qm_reset_bit_clear()
596 if (qm->ver < QM_HW_V3) in qm_reset_bit_clear()
599 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_reset_bit_clear()
615 int hisi_qm_wait_mb_ready(struct hisi_qm *qm) in hisi_qm_wait_mb_ready() argument
619 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, in hisi_qm_wait_mb_ready()
626 static void qm_mb_write(struct hisi_qm *qm, const void *src) in qm_mb_write() argument
628 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; in qm_mb_write()
652 static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) in qm_mb_nolock() argument
657 if (unlikely(hisi_qm_wait_mb_ready(qm))) { in qm_mb_nolock()
658 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); in qm_mb_nolock()
663 qm_mb_write(qm, mailbox); in qm_mb_nolock()
665 if (unlikely(hisi_qm_wait_mb_ready(qm))) { in qm_mb_nolock()
666 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); in qm_mb_nolock()
671 val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); in qm_mb_nolock()
673 dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); in qm_mb_nolock()
681 atomic64_inc(&qm->debug.dfx.mb_err_cnt); in qm_mb_nolock()
685 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, in hisi_qm_mb() argument
691 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n", in hisi_qm_mb()
696 mutex_lock(&qm->mailbox_lock); in hisi_qm_mb()
697 ret = qm_mb_nolock(qm, &mailbox); in hisi_qm_mb()
698 mutex_unlock(&qm->mailbox_lock); in hisi_qm_mb()
704 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) in qm_db_v1() argument
712 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); in qm_db_v1()
715 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) in qm_db_v2() argument
717 void __iomem *io_base = qm->io_base; in qm_db_v2()
722 io_base = qm->db_io_base + (u64)qn * qm->db_interval + in qm_db_v2()
735 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) in qm_db() argument
737 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", in qm_db()
740 qm->ops->qm_db(qm, qn, cmd, index, priority); in qm_db()
743 static void qm_disable_clock_gate(struct hisi_qm *qm) in qm_disable_clock_gate() argument
747 /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */ in qm_disable_clock_gate()
748 if (qm->ver < QM_HW_V3) in qm_disable_clock_gate()
751 val = readl(qm->io_base + QM_PM_CTRL); in qm_disable_clock_gate()
753 writel(val, qm->io_base + QM_PM_CTRL); in qm_disable_clock_gate()
756 static int qm_dev_mem_reset(struct hisi_qm *qm) in qm_dev_mem_reset() argument
760 writel(0x1, qm->io_base + QM_MEM_START_INIT); in qm_dev_mem_reset()
761 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, in qm_dev_mem_reset()
768 * @qm: The qm which want to get information.
775 u32 hisi_qm_get_hw_info(struct hisi_qm *qm, in hisi_qm_get_hw_info() argument
781 switch (qm->ver) { in hisi_qm_get_hw_info()
790 val = readl(qm->io_base + info_table[index].offset); in hisi_qm_get_hw_info()
796 static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, in qm_get_xqc_depth() argument
801 depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); in qm_get_xqc_depth()
806 int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs, in hisi_qm_set_algs() argument
809 struct device *dev = &qm->pdev->dev; in hisi_qm_set_algs()
813 if (!qm->uacce) in hisi_qm_set_algs()
833 qm->uacce->algs = algs; in hisi_qm_set_algs()
840 static u32 qm_get_irq_num(struct hisi_qm *qm) in qm_get_irq_num() argument
842 if (qm->fun_type == QM_HW_PF) in qm_get_irq_num()
843 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver); in qm_get_irq_num()
845 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver); in qm_get_irq_num()
848 static int qm_pm_get_sync(struct hisi_qm *qm) in qm_pm_get_sync() argument
850 struct device *dev = &qm->pdev->dev; in qm_pm_get_sync()
853 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in qm_pm_get_sync()
865 static void qm_pm_put_sync(struct hisi_qm *qm) in qm_pm_put_sync() argument
867 struct device *dev = &qm->pdev->dev; in qm_pm_put_sync()
869 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in qm_pm_put_sync()
889 struct hisi_qm *qm = qp->qm; in qm_poll_req_cb() local
893 qp->req_cb(qp, qp->sqe + qm->sqe_size * in qm_poll_req_cb()
897 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, in qm_poll_req_cb()
905 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); in qm_poll_req_cb()
912 struct hisi_qm *qm = poll_data->qm; in qm_work_process() local
918 qp = &qm->qp_array[poll_data->qp_finish_id[i]]; in qm_work_process()
932 static void qm_get_complete_eqe_num(struct hisi_qm *qm) in qm_get_complete_eqe_num() argument
934 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; in qm_get_complete_eqe_num()
936 u16 eq_depth = qm->eq_depth; in qm_get_complete_eqe_num()
939 if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) { in qm_get_complete_eqe_num()
940 atomic64_inc(&qm->debug.dfx.err_irq_cnt); in qm_get_complete_eqe_num()
941 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_get_complete_eqe_num()
946 if (unlikely(cqn >= qm->qp_num)) in qm_get_complete_eqe_num()
948 poll_data = &qm->poll_data[cqn]; in qm_get_complete_eqe_num()
950 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { in qm_get_complete_eqe_num()
955 if (qm->status.eq_head == eq_depth - 1) { in qm_get_complete_eqe_num()
956 qm->status.eqc_phase = !qm->status.eqc_phase; in qm_get_complete_eqe_num()
957 eqe = qm->eqe; in qm_get_complete_eqe_num()
958 qm->status.eq_head = 0; in qm_get_complete_eqe_num()
961 qm->status.eq_head++; in qm_get_complete_eqe_num()
969 queue_work(qm->wq, &poll_data->work); in qm_get_complete_eqe_num()
970 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_get_complete_eqe_num()
975 struct hisi_qm *qm = data; in qm_eq_irq() local
978 qm_get_complete_eqe_num(qm); in qm_eq_irq()
985 struct hisi_qm *qm = data; in qm_mb_cmd_irq() local
988 val = readl(qm->io_base + QM_IFC_INT_STATUS); in qm_mb_cmd_irq()
993 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) { in qm_mb_cmd_irq()
994 dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n"); in qm_mb_cmd_irq()
998 schedule_work(&qm->cmd_process); in qm_mb_cmd_irq()
1017 static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id) in qm_disable_qp() argument
1019 struct hisi_qp *qp = &qm->qp_array[qp_id]; in qm_disable_qp()
1026 static void qm_reset_function(struct hisi_qm *qm) in qm_reset_function() argument
1028 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); in qm_reset_function()
1029 struct device *dev = &qm->pdev->dev; in qm_reset_function()
1035 ret = qm_reset_prepare_ready(qm); in qm_reset_function()
1041 ret = hisi_qm_stop(qm, QM_DOWN); in qm_reset_function()
1043 dev_err(dev, "failed to stop qm when reset function\n"); in qm_reset_function()
1047 ret = hisi_qm_start(qm); in qm_reset_function()
1049 dev_err(dev, "failed to start qm when reset function\n"); in qm_reset_function()
1052 qm_reset_bit_clear(qm); in qm_reset_function()
1057 struct hisi_qm *qm = data; in qm_aeq_thread() local
1058 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; in qm_aeq_thread()
1059 u16 aeq_depth = qm->aeq_depth; in qm_aeq_thread()
1062 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); in qm_aeq_thread()
1064 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { in qm_aeq_thread()
1070 dev_err(&qm->pdev->dev, "eq overflow, reset function\n"); in qm_aeq_thread()
1071 qm_reset_function(qm); in qm_aeq_thread()
1074 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n", in qm_aeq_thread()
1078 qm_disable_qp(qm, qp_id); in qm_aeq_thread()
1081 dev_err(&qm->pdev->dev, "unknown error type %u\n", in qm_aeq_thread()
1086 if (qm->status.aeq_head == aeq_depth - 1) { in qm_aeq_thread()
1087 qm->status.aeqc_phase = !qm->status.aeqc_phase; in qm_aeq_thread()
1088 aeqe = qm->aeqe; in qm_aeq_thread()
1089 qm->status.aeq_head = 0; in qm_aeq_thread()
1092 qm->status.aeq_head++; in qm_aeq_thread()
1096 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); in qm_aeq_thread()
1111 static void qm_init_prefetch(struct hisi_qm *qm) in qm_init_prefetch() argument
1113 struct device *dev = &qm->pdev->dev; in qm_init_prefetch()
1116 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) in qm_init_prefetch()
1134 writel(page_type, qm->io_base + QM_PAGE_SIZE); in qm_init_prefetch()
1204 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, in qm_vft_data_cfg() argument
1212 if (qm->ver == QM_HW_V1) { in qm_vft_data_cfg()
1225 if (qm->ver == QM_HW_V1) { in qm_vft_data_cfg()
1246 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); in qm_vft_data_cfg()
1247 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); in qm_vft_data_cfg()
1250 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, in qm_set_vft_common() argument
1257 if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in qm_set_vft_common()
1258 factor = &qm->factor[fun_num]; in qm_set_vft_common()
1260 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_set_vft_common()
1266 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); in qm_set_vft_common()
1267 writel(type, qm->io_base + QM_VFT_CFG_TYPE); in qm_set_vft_common()
1271 writel(fun_num, qm->io_base + QM_VFT_CFG); in qm_set_vft_common()
1273 qm_vft_data_cfg(qm, type, base, number, factor); in qm_set_vft_common()
1275 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); in qm_set_vft_common()
1276 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); in qm_set_vft_common()
1278 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_set_vft_common()
1283 static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num) in qm_shaper_init_vft() argument
1285 u32 qos = qm->factor[fun_num].func_qos; in qm_shaper_init_vft()
1288 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]); in qm_shaper_init_vft()
1290 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n"); in qm_shaper_init_vft()
1293 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG); in qm_shaper_init_vft()
1296 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1); in qm_shaper_init_vft()
1305 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, in qm_set_sqc_cqc_vft() argument
1311 ret = qm_set_vft_common(qm, i, fun_num, base, number); in qm_set_sqc_cqc_vft()
1317 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { in qm_set_sqc_cqc_vft()
1318 ret = qm_shaper_init_vft(qm, fun_num); in qm_set_sqc_cqc_vft()
1326 qm_set_vft_common(qm, i, fun_num, 0, 0); in qm_set_sqc_cqc_vft()
1331 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) in qm_get_vft_v2() argument
1336 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); in qm_get_vft_v2()
1340 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | in qm_get_vft_v2()
1341 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); in qm_get_vft_v2()
1349 void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, in hisi_qm_ctx_alloc() argument
1352 struct device *dev = &qm->pdev->dev; in hisi_qm_ctx_alloc()
1369 void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, in hisi_qm_ctx_free() argument
1372 struct device *dev = &qm->pdev->dev; in hisi_qm_ctx_free()
1378 static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) in qm_dump_sqc_raw() argument
1380 return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); in qm_dump_sqc_raw()
1383 static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) in qm_dump_cqc_raw() argument
1385 return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); in qm_dump_cqc_raw()
1388 static void qm_hw_error_init_v1(struct hisi_qm *qm) in qm_hw_error_init_v1() argument
1390 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v1()
1393 static void qm_hw_error_cfg(struct hisi_qm *qm) in qm_hw_error_cfg() argument
1395 struct hisi_qm_err_info *err_info = &qm->err_info; in qm_hw_error_cfg()
1397 qm->error_mask = err_info->nfe | err_info->ce | err_info->fe; in qm_hw_error_cfg()
1398 /* clear QM hw residual error source */ in qm_hw_error_cfg()
1399 writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_hw_error_cfg()
1402 writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE); in qm_hw_error_cfg()
1403 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); in qm_hw_error_cfg()
1404 writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE); in qm_hw_error_cfg()
1405 writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE); in qm_hw_error_cfg()
1408 static void qm_hw_error_init_v2(struct hisi_qm *qm) in qm_hw_error_init_v2() argument
1412 qm_hw_error_cfg(qm); in qm_hw_error_init_v2()
1414 irq_unmask = ~qm->error_mask; in qm_hw_error_init_v2()
1415 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v2()
1416 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v2()
1419 static void qm_hw_error_uninit_v2(struct hisi_qm *qm) in qm_hw_error_uninit_v2() argument
1421 u32 irq_mask = qm->error_mask; in qm_hw_error_uninit_v2()
1423 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v2()
1424 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v2()
1427 static void qm_hw_error_init_v3(struct hisi_qm *qm) in qm_hw_error_init_v3() argument
1431 qm_hw_error_cfg(qm); in qm_hw_error_init_v3()
1434 writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); in qm_hw_error_init_v3()
1436 irq_unmask = ~qm->error_mask; in qm_hw_error_init_v3()
1437 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v3()
1438 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v3()
1441 static void qm_hw_error_uninit_v3(struct hisi_qm *qm) in qm_hw_error_uninit_v3() argument
1443 u32 irq_mask = qm->error_mask; in qm_hw_error_uninit_v3()
1445 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v3()
1446 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v3()
1449 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); in qm_hw_error_uninit_v3()
1452 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) in qm_log_hw_error() argument
1455 struct device *dev = &qm->pdev->dev; in qm_log_hw_error()
1468 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01); in qm_log_hw_error()
1472 dev_err(dev, "qm %s doorbell timeout in function %u\n", in qm_log_hw_error()
1475 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00); in qm_log_hw_error()
1481 dev_err(dev, "qm %s fifo overflow in function %u\n", in qm_log_hw_error()
1489 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) in qm_hw_error_handle_v2() argument
1494 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); in qm_hw_error_handle_v2()
1495 error_status = qm->error_mask & tmp; in qm_hw_error_handle_v2()
1499 qm->err_status.is_qm_ecc_mbit = true; in qm_hw_error_handle_v2()
1501 qm_log_hw_error(qm, error_status); in qm_hw_error_handle_v2()
1502 if (error_status & qm->err_info.qm_reset_mask) in qm_hw_error_handle_v2()
1505 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_hw_error_handle_v2()
1506 writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); in qm_hw_error_handle_v2()
1512 static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num) in qm_get_mb_cmd() argument
1518 mutex_lock(&qm->mailbox_lock); in qm_get_mb_cmd()
1519 ret = qm_mb_nolock(qm, &mailbox); in qm_get_mb_cmd()
1523 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | in qm_get_mb_cmd()
1524 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); in qm_get_mb_cmd()
1527 mutex_unlock(&qm->mailbox_lock); in qm_get_mb_cmd()
1531 static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask) in qm_clear_cmd_interrupt() argument
1535 if (qm->fun_type == QM_HW_PF) in qm_clear_cmd_interrupt()
1536 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P); in qm_clear_cmd_interrupt()
1538 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V); in qm_clear_cmd_interrupt()
1540 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V); in qm_clear_cmd_interrupt()
1543 static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id) in qm_handle_vf_msg() argument
1545 struct device *dev = &qm->pdev->dev; in qm_handle_vf_msg()
1550 ret = qm_get_mb_cmd(qm, &msg, vf_id); in qm_handle_vf_msg()
1573 static int qm_wait_vf_prepare_finish(struct hisi_qm *qm) in qm_wait_vf_prepare_finish() argument
1575 struct device *dev = &qm->pdev->dev; in qm_wait_vf_prepare_finish()
1576 u32 vfs_num = qm->vfs_num; in qm_wait_vf_prepare_finish()
1582 if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in qm_wait_vf_prepare_finish()
1586 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); in qm_wait_vf_prepare_finish()
1602 qm_handle_vf_msg(qm, i); in qm_wait_vf_prepare_finish()
1608 qm_clear_cmd_interrupt(qm, val); in qm_wait_vf_prepare_finish()
1613 static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num) in qm_trigger_vf_interrupt() argument
1617 val = readl(qm->io_base + QM_IFC_INT_CFG); in qm_trigger_vf_interrupt()
1620 writel(val, qm->io_base + QM_IFC_INT_CFG); in qm_trigger_vf_interrupt()
1622 val = readl(qm->io_base + QM_IFC_INT_SET_P); in qm_trigger_vf_interrupt()
1624 writel(val, qm->io_base + QM_IFC_INT_SET_P); in qm_trigger_vf_interrupt()
1627 static void qm_trigger_pf_interrupt(struct hisi_qm *qm) in qm_trigger_pf_interrupt() argument
1631 val = readl(qm->io_base + QM_IFC_INT_SET_V); in qm_trigger_pf_interrupt()
1633 writel(val, qm->io_base + QM_IFC_INT_SET_V); in qm_trigger_pf_interrupt()
1636 static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num) in qm_ping_single_vf() argument
1638 struct device *dev = &qm->pdev->dev; in qm_ping_single_vf()
1645 mutex_lock(&qm->mailbox_lock); in qm_ping_single_vf()
1646 ret = qm_mb_nolock(qm, &mailbox); in qm_ping_single_vf()
1652 qm_trigger_vf_interrupt(qm, fun_num); in qm_ping_single_vf()
1655 val = readq(qm->io_base + QM_IFC_READY_STATUS); in qm_ping_single_vf()
1668 mutex_unlock(&qm->mailbox_lock); in qm_ping_single_vf()
1672 static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd) in qm_ping_all_vfs() argument
1674 struct device *dev = &qm->pdev->dev; in qm_ping_all_vfs()
1675 u32 vfs_num = qm->vfs_num; in qm_ping_all_vfs()
1683 mutex_lock(&qm->mailbox_lock); in qm_ping_all_vfs()
1685 ret = qm_mb_nolock(qm, &mailbox); in qm_ping_all_vfs()
1688 mutex_unlock(&qm->mailbox_lock); in qm_ping_all_vfs()
1692 qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS); in qm_ping_all_vfs()
1695 val = readq(qm->io_base + QM_IFC_READY_STATUS); in qm_ping_all_vfs()
1698 mutex_unlock(&qm->mailbox_lock); in qm_ping_all_vfs()
1706 mutex_unlock(&qm->mailbox_lock); in qm_ping_all_vfs()
1717 static int qm_ping_pf(struct hisi_qm *qm, u64 cmd) in qm_ping_pf() argument
1725 mutex_lock(&qm->mailbox_lock); in qm_ping_pf()
1726 ret = qm_mb_nolock(qm, &mailbox); in qm_ping_pf()
1728 dev_err(&qm->pdev->dev, "failed to send command to PF!\n"); in qm_ping_pf()
1732 qm_trigger_pf_interrupt(qm); in qm_ping_pf()
1736 val = readl(qm->io_base + QM_IFC_INT_SET_V); in qm_ping_pf()
1747 mutex_unlock(&qm->mailbox_lock); in qm_ping_pf()
1753 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); in qm_stop_qp()
1756 static int qm_set_msi(struct hisi_qm *qm, bool set) in qm_set_msi() argument
1758 struct pci_dev *pdev = qm->pdev; in qm_set_msi()
1766 if (qm->err_status.is_qm_ecc_mbit || in qm_set_msi()
1767 qm->err_status.is_dev_ecc_mbit) in qm_set_msi()
1771 if (readl(qm->io_base + QM_PEH_DFX_INFO0)) in qm_set_msi()
1778 static void qm_wait_msi_finish(struct hisi_qm *qm) in qm_wait_msi_finish() argument
1780 struct pci_dev *pdev = qm->pdev; in qm_wait_msi_finish()
1800 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0, in qm_wait_msi_finish()
1806 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1, in qm_wait_msi_finish()
1813 static int qm_set_msi_v3(struct hisi_qm *qm, bool set) in qm_set_msi_v3() argument
1815 struct pci_dev *pdev = qm->pdev; in qm_set_msi_v3()
1836 qm_wait_msi_finish(qm); in qm_set_msi_v3()
1875 return qp->sqe + sq_tail * qp->qm->sqe_size; in qm_get_avail_sqe()
1887 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) in qm_create_qp_nolock() argument
1889 struct device *dev = &qm->pdev->dev; in qm_create_qp_nolock()
1893 if (!qm_qp_avail_state(qm, NULL, QP_INIT)) in qm_create_qp_nolock()
1896 if (qm->qp_in_used == qm->qp_num) { in qm_create_qp_nolock()
1897 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", in qm_create_qp_nolock()
1898 qm->qp_num); in qm_create_qp_nolock()
1899 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); in qm_create_qp_nolock()
1903 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); in qm_create_qp_nolock()
1905 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", in qm_create_qp_nolock()
1906 qm->qp_num); in qm_create_qp_nolock()
1907 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); in qm_create_qp_nolock()
1911 qp = &qm->qp_array[qp_id]; in qm_create_qp_nolock()
1920 qm->qp_in_used++; in qm_create_qp_nolock()
1927 * hisi_qm_create_qp() - Create a queue pair from qm.
1928 * @qm: The qm we create a qp from.
1933 static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) in hisi_qm_create_qp() argument
1938 ret = qm_pm_get_sync(qm); in hisi_qm_create_qp()
1942 down_write(&qm->qps_lock); in hisi_qm_create_qp()
1943 qp = qm_create_qp_nolock(qm, alg_type); in hisi_qm_create_qp()
1944 up_write(&qm->qps_lock); in hisi_qm_create_qp()
1947 qm_pm_put_sync(qm); in hisi_qm_create_qp()
1953 * hisi_qm_release_qp() - Release a qp back to its qm.
1960 struct hisi_qm *qm = qp->qm; in hisi_qm_release_qp() local
1962 down_write(&qm->qps_lock); in hisi_qm_release_qp()
1964 if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) { in hisi_qm_release_qp()
1965 up_write(&qm->qps_lock); in hisi_qm_release_qp()
1969 qm->qp_in_used--; in hisi_qm_release_qp()
1970 idr_remove(&qm->qp_idr, qp->qp_id); in hisi_qm_release_qp()
1972 up_write(&qm->qps_lock); in hisi_qm_release_qp()
1974 qm_pm_put_sync(qm); in hisi_qm_release_qp()
1979 struct hisi_qm *qm = qp->qm; in qm_sq_ctx_cfg() local
1980 struct device *dev = &qm->pdev->dev; in qm_sq_ctx_cfg()
1981 enum qm_hw_ver ver = qm->ver; in qm_sq_ctx_cfg()
1992 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); in qm_sq_ctx_cfg()
1995 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); in qm_sq_ctx_cfg()
2001 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) in qm_sq_ctx_cfg()
2012 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); in qm_sq_ctx_cfg()
2021 struct hisi_qm *qm = qp->qm; in qm_cq_ctx_cfg() local
2022 struct device *dev = &qm->pdev->dev; in qm_cq_ctx_cfg()
2023 enum qm_hw_ver ver = qm->ver; in qm_cq_ctx_cfg()
2043 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) in qm_cq_ctx_cfg()
2053 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); in qm_cq_ctx_cfg()
2075 struct hisi_qm *qm = qp->qm; in qm_start_qp_nolock() local
2076 struct device *dev = &qm->pdev->dev; in qm_start_qp_nolock()
2081 if (!qm_qp_avail_state(qm, qp, QP_START)) in qm_start_qp_nolock()
2104 struct hisi_qm *qm = qp->qm; in hisi_qm_start_qp() local
2107 down_write(&qm->qps_lock); in hisi_qm_start_qp()
2109 up_write(&qm->qps_lock); in hisi_qm_start_qp()
2127 struct hisi_qm *qm = qp->qm; in qp_stop_fail_cb() local
2133 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); in qp_stop_fail_cb()
2148 struct hisi_qm *qm = qp->qm; in qm_drain_qp() local
2149 struct device *dev = &qm->pdev->dev; in qm_drain_qp()
2157 if (qm_check_dev_error(qm)) in qm_drain_qp()
2161 if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { in qm_drain_qp()
2168 addr = hisi_qm_ctx_alloc(qm, size, &dma_addr); in qm_drain_qp()
2175 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id); in qm_drain_qp()
2182 ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)), in qm_drain_qp()
2203 hisi_qm_ctx_free(qm, size, addr, &dma_addr); in qm_drain_qp()
2210 struct device *dev = &qp->qm->pdev->dev; in qm_stop_qp_nolock()
2224 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP)) in qm_stop_qp_nolock()
2234 flush_workqueue(qp->qm->wq); in qm_stop_qp_nolock()
2244 * hisi_qm_stop_qp() - Stop a qp in qm.
2253 down_write(&qp->qm->qps_lock); in hisi_qm_stop_qp()
2255 up_write(&qp->qm->qps_lock); in hisi_qm_stop_qp()
2267 * if qp related qm is resetting.
2272 * causes current qm_db sending fail or can not receive sended sqe. QM
2284 atomic_read(&qp->qm->status.flags) == QM_STOP || in hisi_qp_send()
2286 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); in hisi_qp_send()
2293 memcpy(sqe, msg, qp->qm->sqe_size); in hisi_qp_send()
2295 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); in hisi_qp_send()
2303 static void hisi_qm_cache_wb(struct hisi_qm *qm) in hisi_qm_cache_wb() argument
2307 if (qm->ver == QM_HW_V1) in hisi_qm_cache_wb()
2310 writel(0x1, qm->io_base + QM_CACHE_WB_START); in hisi_qm_cache_wb()
2311 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, in hisi_qm_cache_wb()
2314 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); in hisi_qm_cache_wb()
2322 /* This function returns free number of qp in qm. */
2325 struct hisi_qm *qm = uacce->priv; in hisi_qm_get_available_instances() local
2328 down_read(&qm->qps_lock); in hisi_qm_get_available_instances()
2329 ret = qm->qp_num - qm->qp_in_used; in hisi_qm_get_available_instances()
2330 up_read(&qm->qps_lock); in hisi_qm_get_available_instances()
2335 static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset) in hisi_qm_set_hw_reset() argument
2339 for (i = 0; i < qm->qp_num; i++) in hisi_qm_set_hw_reset()
2340 qm_set_qp_disable(&qm->qp_array[i], offset); in hisi_qm_set_hw_reset()
2347 struct hisi_qm *qm = uacce->priv; in hisi_qm_uacce_get_queue() local
2351 qp = hisi_qm_create_qp(qm, alg_type); in hisi_qm_uacce_get_queue()
2378 struct hisi_qm *qm = qp->qm; in hisi_qm_uacce_mmap() local
2379 resource_size_t phys_base = qm->db_phys_base + in hisi_qm_uacce_mmap()
2380 qp->qp_id * qm->db_interval; in hisi_qm_uacce_mmap()
2382 struct pci_dev *pdev = qm->pdev; in hisi_qm_uacce_mmap()
2389 if (qm->ver == QM_HW_V1) { in hisi_qm_uacce_mmap()
2392 } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { in hisi_qm_uacce_mmap()
2397 if (sz > qm->db_interval) in hisi_qm_uacce_mmap()
2457 struct hisi_qm *qm = q->uacce->priv; in qm_set_sqctype() local
2460 down_write(&qm->qps_lock); in qm_set_sqctype()
2462 up_write(&qm->qps_lock); in qm_set_sqctype()
2493 qp_info.sqe_size = qp->qm->sqe_size; in hisi_qm_uacce_ioctl()
2510 * @qm: the uacce device
2512 static int qm_hw_err_isolate(struct hisi_qm *qm) in qm_hw_err_isolate() argument
2518 isolate = &qm->isolate_data; in qm_hw_err_isolate()
2523 if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold) in qm_hw_err_isolate()
2556 static void qm_hw_err_destroy(struct hisi_qm *qm) in qm_hw_err_destroy() argument
2560 mutex_lock(&qm->isolate_data.isolate_lock); in qm_hw_err_destroy()
2561 list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) { in qm_hw_err_destroy()
2565 mutex_unlock(&qm->isolate_data.isolate_lock); in qm_hw_err_destroy()
2570 struct hisi_qm *qm = uacce->priv; in hisi_qm_get_isolate_state() local
2574 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); in hisi_qm_get_isolate_state()
2576 pf_qm = qm; in hisi_qm_get_isolate_state()
2584 struct hisi_qm *qm = uacce->priv; in hisi_qm_isolate_threshold_write() local
2590 if (qm->isolate_data.is_isolate) in hisi_qm_isolate_threshold_write()
2593 qm->isolate_data.err_threshold = num; in hisi_qm_isolate_threshold_write()
2596 qm_hw_err_destroy(qm); in hisi_qm_isolate_threshold_write()
2603 struct hisi_qm *qm = uacce->priv; in hisi_qm_isolate_threshold_read() local
2607 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); in hisi_qm_isolate_threshold_read()
2611 return qm->isolate_data.err_threshold; in hisi_qm_isolate_threshold_read()
2628 static void qm_remove_uacce(struct hisi_qm *qm) in qm_remove_uacce() argument
2630 struct uacce_device *uacce = qm->uacce; in qm_remove_uacce()
2632 if (qm->use_sva) { in qm_remove_uacce()
2633 qm_hw_err_destroy(qm); in qm_remove_uacce()
2635 qm->uacce = NULL; in qm_remove_uacce()
2639 static int qm_alloc_uacce(struct hisi_qm *qm) in qm_alloc_uacce() argument
2641 struct pci_dev *pdev = qm->pdev; in qm_alloc_uacce()
2662 qm->use_sva = true; in qm_alloc_uacce()
2665 qm_remove_uacce(qm); in qm_alloc_uacce()
2670 uacce->priv = qm; in qm_alloc_uacce()
2672 if (qm->ver == QM_HW_V1) in qm_alloc_uacce()
2674 else if (qm->ver == QM_HW_V2) in qm_alloc_uacce()
2679 if (qm->ver == QM_HW_V1) in qm_alloc_uacce()
2681 else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) in qm_alloc_uacce()
2685 mmio_page_nr = qm->db_interval / PAGE_SIZE; in qm_alloc_uacce()
2687 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); in qm_alloc_uacce()
2690 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + in qm_alloc_uacce()
2697 qm->uacce = uacce; in qm_alloc_uacce()
2698 INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs); in qm_alloc_uacce()
2699 mutex_init(&qm->isolate_data.isolate_lock); in qm_alloc_uacce()
2705 * qm_frozen() - Try to froze QM to cut continuous queue request. If
2706 * there is user on the QM, return failure without doing anything.
2707 * @qm: The qm needed to be fronzen.
2709 * This function frozes QM, then we can do SRIOV disabling.
2711 static int qm_frozen(struct hisi_qm *qm) in qm_frozen() argument
2713 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) in qm_frozen()
2716 down_write(&qm->qps_lock); in qm_frozen()
2718 if (!qm->qp_in_used) { in qm_frozen()
2719 qm->qp_in_used = qm->qp_num; in qm_frozen()
2720 up_write(&qm->qps_lock); in qm_frozen()
2721 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl); in qm_frozen()
2725 up_write(&qm->qps_lock); in qm_frozen()
2733 struct hisi_qm *qm, *vf_qm; in qm_try_frozen_vfs() local
2742 list_for_each_entry(qm, &qm_list->list, list) { in qm_try_frozen_vfs()
2743 dev = qm->pdev; in qm_try_frozen_vfs()
2763 * @qm: The qm needed to wait for the task to finish.
2766 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list) in hisi_qm_wait_task_finish() argument
2768 while (qm_frozen(qm) || in hisi_qm_wait_task_finish()
2769 ((qm->fun_type == QM_HW_PF) && in hisi_qm_wait_task_finish()
2770 qm_try_frozen_vfs(qm->pdev, qm_list))) { in hisi_qm_wait_task_finish()
2774 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) || in hisi_qm_wait_task_finish()
2775 test_bit(QM_RESETTING, &qm->misc_ctl)) in hisi_qm_wait_task_finish()
2778 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in hisi_qm_wait_task_finish()
2779 flush_work(&qm->cmd_process); in hisi_qm_wait_task_finish()
2785 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) in hisi_qp_memory_uninit() argument
2787 struct device *dev = &qm->pdev->dev; in hisi_qp_memory_uninit()
2792 qdma = &qm->qp_array[i].qdma; in hisi_qp_memory_uninit()
2794 kfree(qm->poll_data[i].qp_finish_id); in hisi_qp_memory_uninit()
2797 kfree(qm->poll_data); in hisi_qp_memory_uninit()
2798 kfree(qm->qp_array); in hisi_qp_memory_uninit()
2801 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, in hisi_qp_memory_init() argument
2804 struct device *dev = &qm->pdev->dev; in hisi_qp_memory_init()
2805 size_t off = qm->sqe_size * sq_depth; in hisi_qp_memory_init()
2809 qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16), in hisi_qp_memory_init()
2811 if (!qm->poll_data[id].qp_finish_id) in hisi_qp_memory_init()
2814 qp = &qm->qp_array[id]; in hisi_qp_memory_init()
2827 qp->qm = qm; in hisi_qp_memory_init()
2833 kfree(qm->poll_data[id].qp_finish_id); in hisi_qp_memory_init()
2837 static void hisi_qm_pre_init(struct hisi_qm *qm) in hisi_qm_pre_init() argument
2839 struct pci_dev *pdev = qm->pdev; in hisi_qm_pre_init()
2841 if (qm->ver == QM_HW_V1) in hisi_qm_pre_init()
2842 qm->ops = &qm_hw_ops_v1; in hisi_qm_pre_init()
2843 else if (qm->ver == QM_HW_V2) in hisi_qm_pre_init()
2844 qm->ops = &qm_hw_ops_v2; in hisi_qm_pre_init()
2846 qm->ops = &qm_hw_ops_v3; in hisi_qm_pre_init()
2848 pci_set_drvdata(pdev, qm); in hisi_qm_pre_init()
2849 mutex_init(&qm->mailbox_lock); in hisi_qm_pre_init()
2850 init_rwsem(&qm->qps_lock); in hisi_qm_pre_init()
2851 qm->qp_in_used = 0; in hisi_qm_pre_init()
2852 if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { in hisi_qm_pre_init()
2858 static void qm_cmd_uninit(struct hisi_qm *qm) in qm_cmd_uninit() argument
2862 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in qm_cmd_uninit()
2865 val = readl(qm->io_base + QM_IFC_INT_MASK); in qm_cmd_uninit()
2867 writel(val, qm->io_base + QM_IFC_INT_MASK); in qm_cmd_uninit()
2870 static void qm_cmd_init(struct hisi_qm *qm) in qm_cmd_init() argument
2874 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in qm_cmd_init()
2878 qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR); in qm_cmd_init()
2881 val = readl(qm->io_base + QM_IFC_INT_MASK); in qm_cmd_init()
2883 writel(val, qm->io_base + QM_IFC_INT_MASK); in qm_cmd_init()
2886 static void qm_put_pci_res(struct hisi_qm *qm) in qm_put_pci_res() argument
2888 struct pci_dev *pdev = qm->pdev; in qm_put_pci_res()
2890 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) in qm_put_pci_res()
2891 iounmap(qm->db_io_base); in qm_put_pci_res()
2893 iounmap(qm->io_base); in qm_put_pci_res()
2897 static void hisi_qm_pci_uninit(struct hisi_qm *qm) in hisi_qm_pci_uninit() argument
2899 struct pci_dev *pdev = qm->pdev; in hisi_qm_pci_uninit()
2902 qm_put_pci_res(qm); in hisi_qm_pci_uninit()
2906 static void hisi_qm_set_state(struct hisi_qm *qm, u8 state) in hisi_qm_set_state() argument
2908 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF) in hisi_qm_set_state()
2909 writel(state, qm->io_base + QM_VF_STATE); in hisi_qm_set_state()
2912 static void hisi_qm_unint_work(struct hisi_qm *qm) in hisi_qm_unint_work() argument
2914 destroy_workqueue(qm->wq); in hisi_qm_unint_work()
2917 static void hisi_qm_memory_uninit(struct hisi_qm *qm) in hisi_qm_memory_uninit() argument
2919 struct device *dev = &qm->pdev->dev; in hisi_qm_memory_uninit()
2921 hisi_qp_memory_uninit(qm, qm->qp_num); in hisi_qm_memory_uninit()
2922 if (qm->qdma.va) { in hisi_qm_memory_uninit()
2923 hisi_qm_cache_wb(qm); in hisi_qm_memory_uninit()
2924 dma_free_coherent(dev, qm->qdma.size, in hisi_qm_memory_uninit()
2925 qm->qdma.va, qm->qdma.dma); in hisi_qm_memory_uninit()
2928 idr_destroy(&qm->qp_idr); in hisi_qm_memory_uninit()
2930 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in hisi_qm_memory_uninit()
2931 kfree(qm->factor); in hisi_qm_memory_uninit()
2935 * hisi_qm_uninit() - Uninitialize qm.
2936 * @qm: The qm needed uninit.
2938 * This function uninits qm related device resources.
2940 void hisi_qm_uninit(struct hisi_qm *qm) in hisi_qm_uninit() argument
2942 qm_cmd_uninit(qm); in hisi_qm_uninit()
2943 hisi_qm_unint_work(qm); in hisi_qm_uninit()
2944 down_write(&qm->qps_lock); in hisi_qm_uninit()
2946 if (!qm_avail_state(qm, QM_CLOSE)) { in hisi_qm_uninit()
2947 up_write(&qm->qps_lock); in hisi_qm_uninit()
2951 hisi_qm_memory_uninit(qm); in hisi_qm_uninit()
2952 hisi_qm_set_state(qm, QM_NOT_READY); in hisi_qm_uninit()
2953 up_write(&qm->qps_lock); in hisi_qm_uninit()
2955 qm_irqs_unregister(qm); in hisi_qm_uninit()
2956 hisi_qm_pci_uninit(qm); in hisi_qm_uninit()
2957 if (qm->use_sva) { in hisi_qm_uninit()
2958 uacce_remove(qm->uacce); in hisi_qm_uninit()
2959 qm->uacce = NULL; in hisi_qm_uninit()
2965 * hisi_qm_get_vft() - Get vft from a qm.
2966 * @qm: The qm we want to get its vft.
2970 * We can allocate multiple queues to a qm by configuring virtual function
2974 * qm hw v1 does not support this interface.
2976 static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) in hisi_qm_get_vft() argument
2981 if (!qm->ops->get_vft) { in hisi_qm_get_vft()
2982 dev_err(&qm->pdev->dev, "Don't support vft read!\n"); in hisi_qm_get_vft()
2986 return qm->ops->get_vft(qm, base, number); in hisi_qm_get_vft()
2990 * hisi_qm_set_vft() - Set vft to a qm.
2991 * @qm: The qm we want to set its vft.
2999 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
3000 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
3003 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, in hisi_qm_set_vft() argument
3006 u32 max_q_num = qm->ctrl_qp_num; in hisi_qm_set_vft()
3012 return qm_set_sqc_cqc_vft(qm, fun_num, base, number); in hisi_qm_set_vft()
3015 static void qm_init_eq_aeq_status(struct hisi_qm *qm) in qm_init_eq_aeq_status() argument
3017 struct hisi_qm_status *status = &qm->status; in qm_init_eq_aeq_status()
3025 static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm) in qm_enable_eq_aeq_interrupts() argument
3028 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); in qm_enable_eq_aeq_interrupts()
3029 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_enable_eq_aeq_interrupts()
3031 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); in qm_enable_eq_aeq_interrupts()
3032 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); in qm_enable_eq_aeq_interrupts()
3035 static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm) in qm_disable_eq_aeq_interrupts() argument
3037 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); in qm_disable_eq_aeq_interrupts()
3038 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); in qm_disable_eq_aeq_interrupts()
3041 static int qm_eq_ctx_cfg(struct hisi_qm *qm) in qm_eq_ctx_cfg() argument
3043 struct device *dev = &qm->pdev->dev; in qm_eq_ctx_cfg()
3052 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); in qm_eq_ctx_cfg()
3053 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); in qm_eq_ctx_cfg()
3054 if (qm->ver == QM_HW_V1) in qm_eq_ctx_cfg()
3056 eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); in qm_eq_ctx_cfg()
3065 ret = hisi_qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); in qm_eq_ctx_cfg()
3072 static int qm_aeq_ctx_cfg(struct hisi_qm *qm) in qm_aeq_ctx_cfg() argument
3074 struct device *dev = &qm->pdev->dev; in qm_aeq_ctx_cfg()
3083 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); in qm_aeq_ctx_cfg()
3084 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); in qm_aeq_ctx_cfg()
3085 aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); in qm_aeq_ctx_cfg()
3094 ret = hisi_qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); in qm_aeq_ctx_cfg()
3101 static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm) in qm_eq_aeq_ctx_cfg() argument
3103 struct device *dev = &qm->pdev->dev; in qm_eq_aeq_ctx_cfg()
3106 qm_init_eq_aeq_status(qm); in qm_eq_aeq_ctx_cfg()
3108 ret = qm_eq_ctx_cfg(qm); in qm_eq_aeq_ctx_cfg()
3114 return qm_aeq_ctx_cfg(qm); in qm_eq_aeq_ctx_cfg()
3117 static int __hisi_qm_start(struct hisi_qm *qm) in __hisi_qm_start() argument
3121 WARN_ON(!qm->qdma.va); in __hisi_qm_start()
3123 if (qm->fun_type == QM_HW_PF) { in __hisi_qm_start()
3124 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); in __hisi_qm_start()
3129 ret = qm_eq_aeq_ctx_cfg(qm); in __hisi_qm_start()
3133 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); in __hisi_qm_start()
3137 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); in __hisi_qm_start()
3141 qm_init_prefetch(qm); in __hisi_qm_start()
3142 qm_enable_eq_aeq_interrupts(qm); in __hisi_qm_start()
3148 * hisi_qm_start() - start qm
3149 * @qm: The qm to be started.
3151 * This function starts a qm, then we can allocate qp from this qm.
3153 int hisi_qm_start(struct hisi_qm *qm) in hisi_qm_start() argument
3155 struct device *dev = &qm->pdev->dev; in hisi_qm_start()
3158 down_write(&qm->qps_lock); in hisi_qm_start()
3160 if (!qm_avail_state(qm, QM_START)) { in hisi_qm_start()
3161 up_write(&qm->qps_lock); in hisi_qm_start()
3165 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num); in hisi_qm_start()
3167 if (!qm->qp_num) { in hisi_qm_start()
3173 ret = __hisi_qm_start(qm); in hisi_qm_start()
3175 atomic_set(&qm->status.flags, QM_START); in hisi_qm_start()
3177 hisi_qm_set_state(qm, QM_READY); in hisi_qm_start()
3179 up_write(&qm->qps_lock); in hisi_qm_start()
3184 static int qm_restart(struct hisi_qm *qm) in qm_restart() argument
3186 struct device *dev = &qm->pdev->dev; in qm_restart()
3190 ret = hisi_qm_start(qm); in qm_restart()
3194 down_write(&qm->qps_lock); in qm_restart()
3195 for (i = 0; i < qm->qp_num; i++) { in qm_restart()
3196 qp = &qm->qp_array[i]; in qm_restart()
3203 up_write(&qm->qps_lock); in qm_restart()
3209 up_write(&qm->qps_lock); in qm_restart()
3215 static int qm_stop_started_qp(struct hisi_qm *qm) in qm_stop_started_qp() argument
3217 struct device *dev = &qm->pdev->dev; in qm_stop_started_qp()
3221 for (i = 0; i < qm->qp_num; i++) { in qm_stop_started_qp()
3222 qp = &qm->qp_array[i]; in qm_stop_started_qp()
3237 * qm_clear_queues() - Clear all queues memory in a qm.
3238 * @qm: The qm in which the queues will be cleared.
3240 * This function clears all queues memory in a qm. Reset of accelerator can
3243 static void qm_clear_queues(struct hisi_qm *qm) in qm_clear_queues() argument
3248 for (i = 0; i < qm->qp_num; i++) { in qm_clear_queues()
3249 qp = &qm->qp_array[i]; in qm_clear_queues()
3254 memset(qm->qdma.va, 0, qm->qdma.size); in qm_clear_queues()
3258 * hisi_qm_stop() - Stop a qm.
3259 * @qm: The qm which will be stopped.
3260 * @r: The reason to stop qm.
3262 * This function stops qm and its qps, then qm can not accept request.
3264 * to let qm start again.
3266 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) in hisi_qm_stop() argument
3268 struct device *dev = &qm->pdev->dev; in hisi_qm_stop()
3271 down_write(&qm->qps_lock); in hisi_qm_stop()
3273 qm->status.stop_reason = r; in hisi_qm_stop()
3274 if (!qm_avail_state(qm, QM_STOP)) { in hisi_qm_stop()
3279 if (qm->status.stop_reason == QM_SOFT_RESET || in hisi_qm_stop()
3280 qm->status.stop_reason == QM_DOWN) { in hisi_qm_stop()
3281 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); in hisi_qm_stop()
3282 ret = qm_stop_started_qp(qm); in hisi_qm_stop()
3287 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); in hisi_qm_stop()
3290 qm_disable_eq_aeq_interrupts(qm); in hisi_qm_stop()
3291 if (qm->fun_type == QM_HW_PF) { in hisi_qm_stop()
3292 ret = hisi_qm_set_vft(qm, 0, 0, 0); in hisi_qm_stop()
3300 qm_clear_queues(qm); in hisi_qm_stop()
3301 atomic_set(&qm->status.flags, QM_STOP); in hisi_qm_stop()
3304 up_write(&qm->qps_lock); in hisi_qm_stop()
3309 static void qm_hw_error_init(struct hisi_qm *qm) in qm_hw_error_init() argument
3311 if (!qm->ops->hw_error_init) { in qm_hw_error_init()
3312 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); in qm_hw_error_init()
3316 qm->ops->hw_error_init(qm); in qm_hw_error_init()
3319 static void qm_hw_error_uninit(struct hisi_qm *qm) in qm_hw_error_uninit() argument
3321 if (!qm->ops->hw_error_uninit) { in qm_hw_error_uninit()
3322 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n"); in qm_hw_error_uninit()
3326 qm->ops->hw_error_uninit(qm); in qm_hw_error_uninit()
3329 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) in qm_hw_error_handle() argument
3331 if (!qm->ops->hw_error_handle) { in qm_hw_error_handle()
3332 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); in qm_hw_error_handle()
3336 return qm->ops->hw_error_handle(qm); in qm_hw_error_handle()
3341 * @qm: The qm for which we want to do error initialization.
3343 * Initialize QM and device error related configuration.
3345 void hisi_qm_dev_err_init(struct hisi_qm *qm) in hisi_qm_dev_err_init() argument
3347 if (qm->fun_type == QM_HW_VF) in hisi_qm_dev_err_init()
3350 qm_hw_error_init(qm); in hisi_qm_dev_err_init()
3352 if (!qm->err_ini->hw_err_enable) { in hisi_qm_dev_err_init()
3353 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n"); in hisi_qm_dev_err_init()
3356 qm->err_ini->hw_err_enable(qm); in hisi_qm_dev_err_init()
3362 * @qm: The qm for which we want to do error uninitialization.
3364 * Uninitialize QM and device error related configuration.
3366 void hisi_qm_dev_err_uninit(struct hisi_qm *qm) in hisi_qm_dev_err_uninit() argument
3368 if (qm->fun_type == QM_HW_VF) in hisi_qm_dev_err_uninit()
3371 qm_hw_error_uninit(qm); in hisi_qm_dev_err_uninit()
3373 if (!qm->err_ini->hw_err_disable) { in hisi_qm_dev_err_uninit()
3374 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n"); in hisi_qm_dev_err_uninit()
3377 qm->err_ini->hw_err_disable(qm); in hisi_qm_dev_err_uninit()
3412 struct hisi_qm *qm; in hisi_qm_sort_devices() local
3417 list_for_each_entry(qm, &qm_list->list, list) { in hisi_qm_sort_devices()
3418 dev = &qm->pdev->dev; in hisi_qm_sort_devices()
3428 res->qm = qm; in hisi_qm_sort_devices()
3474 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); in hisi_qm_alloc_qps_node()
3498 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) in qm_vf_q_assign() argument
3501 u32 max_qp_num = qm->max_qp_num; in qm_vf_q_assign()
3502 u32 q_base = qm->qp_num; in qm_vf_q_assign()
3508 vfs_q_num = qm->ctrl_qp_num - qm->qp_num; in qm_vf_q_assign()
3533 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num); in qm_vf_q_assign()
3536 hisi_qm_set_vft(qm, j, 0, 0); in qm_vf_q_assign()
3545 static int qm_clear_vft_config(struct hisi_qm *qm) in qm_clear_vft_config() argument
3550 for (i = 1; i <= qm->vfs_num; i++) { in qm_clear_vft_config()
3551 ret = hisi_qm_set_vft(qm, i, 0, 0); in qm_clear_vft_config()
3555 qm->vfs_num = 0; in qm_clear_vft_config()
3560 static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) in qm_func_shaper_enable() argument
3562 struct device *dev = &qm->pdev->dev; in qm_func_shaper_enable()
3566 total_vfs = pci_sriov_get_totalvfs(qm->pdev); in qm_func_shaper_enable()
3570 qm->factor[fun_index].func_qos = qos; in qm_func_shaper_enable()
3572 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]); in qm_func_shaper_enable()
3580 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1); in qm_func_shaper_enable()
3590 static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index) in qm_get_shaper_vft_qos() argument
3598 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_get_shaper_vft_qos()
3604 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); in qm_get_shaper_vft_qos()
3605 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE); in qm_get_shaper_vft_qos()
3606 writel(fun_index, qm->io_base + QM_VFT_CFG); in qm_get_shaper_vft_qos()
3608 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); in qm_get_shaper_vft_qos()
3609 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); in qm_get_shaper_vft_qos()
3611 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_get_shaper_vft_qos()
3617 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) | in qm_get_shaper_vft_qos()
3618 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32); in qm_get_shaper_vft_qos()
3629 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE; in qm_get_shaper_vft_qos()
3633 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate); in qm_get_shaper_vft_qos()
3640 static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num) in qm_vf_get_qos() argument
3642 struct device *dev = &qm->pdev->dev; in qm_vf_get_qos()
3647 qos = qm_get_shaper_vft_qos(qm, fun_num); in qm_vf_get_qos()
3654 ret = qm_ping_single_vf(qm, mb_cmd, fun_num); in qm_vf_get_qos()
3659 static int qm_vf_read_qos(struct hisi_qm *qm) in qm_vf_read_qos() argument
3665 qm->mb_qos = 0; in qm_vf_read_qos()
3668 ret = qm_ping_pf(qm, QM_VF_GET_QOS); in qm_vf_read_qos()
3670 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); in qm_vf_read_qos()
3676 if (qm->mb_qos) in qm_vf_read_qos()
3680 pci_err(qm->pdev, "PF ping VF timeout!\n"); in qm_vf_read_qos()
3691 struct hisi_qm *qm = filp->private_data; in qm_algqos_read() local
3696 ret = hisi_qm_get_dfx_access(qm); in qm_algqos_read()
3701 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_algqos_read()
3702 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n"); in qm_algqos_read()
3707 if (qm->fun_type == QM_HW_PF) { in qm_algqos_read()
3708 ir = qm_get_shaper_vft_qos(qm, 0); in qm_algqos_read()
3710 ret = qm_vf_read_qos(qm); in qm_algqos_read()
3713 ir = qm->mb_qos; in qm_algqos_read()
3722 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_algqos_read()
3724 hisi_qm_put_dfx_access(qm); in qm_algqos_read()
3728 static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, in qm_get_qos_value() argument
3732 const struct bus_type *bus_type = qm->pdev->dev.bus; in qm_get_qos_value()
3745 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n"); in qm_get_qos_value()
3751 pci_err(qm->pdev, "input pci bdf number is error!\n"); in qm_get_qos_value()
3765 struct hisi_qm *qm = filp->private_data; in qm_algqos_write() local
3782 ret = qm_get_qos_value(qm, tbuf, &val, &fun_index); in qm_algqos_write()
3787 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_algqos_write()
3788 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n"); in qm_algqos_write()
3792 ret = qm_pm_get_sync(qm); in qm_algqos_write()
3798 ret = qm_func_shaper_enable(qm, fun_index, val); in qm_algqos_write()
3800 pci_err(qm->pdev, "failed to enable function shaper!\n"); in qm_algqos_write()
3805 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n", in qm_algqos_write()
3810 qm_pm_put_sync(qm); in qm_algqos_write()
3812 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_algqos_write()
3825 * @qm: The qm for which we want to add debugfs files.
3829 void hisi_qm_set_algqos_init(struct hisi_qm *qm) in hisi_qm_set_algqos_init() argument
3831 if (qm->fun_type == QM_HW_PF) in hisi_qm_set_algqos_init()
3832 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, in hisi_qm_set_algqos_init()
3833 qm, &qm_algqos_fops); in hisi_qm_set_algqos_init()
3834 else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) in hisi_qm_set_algqos_init()
3835 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root, in hisi_qm_set_algqos_init()
3836 qm, &qm_algqos_fops); in hisi_qm_set_algqos_init()
3839 static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func) in hisi_qm_init_vf_qos() argument
3844 qm->factor[i].func_qos = QM_QOS_MAX_VAL; in hisi_qm_init_vf_qos()
3858 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_sriov_enable() local
3861 ret = qm_pm_get_sync(qm); in hisi_qm_sriov_enable()
3881 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in hisi_qm_sriov_enable()
3882 hisi_qm_init_vf_qos(qm, num_vfs); in hisi_qm_sriov_enable()
3884 ret = qm_vf_q_assign(qm, num_vfs); in hisi_qm_sriov_enable()
3890 qm->vfs_num = num_vfs; in hisi_qm_sriov_enable()
3895 qm_clear_vft_config(qm); in hisi_qm_sriov_enable()
3904 qm_pm_put_sync(qm); in hisi_qm_sriov_enable()
3918 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_sriov_disable() local
3927 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) { in hisi_qm_sriov_disable()
3934 ret = qm_clear_vft_config(qm); in hisi_qm_sriov_disable()
3938 qm_pm_put_sync(qm); in hisi_qm_sriov_disable()
3960 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) in qm_dev_err_handle() argument
3964 if (!qm->err_ini->get_dev_hw_err_status) { in qm_dev_err_handle()
3965 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n"); in qm_dev_err_handle()
3970 err_sts = qm->err_ini->get_dev_hw_err_status(qm); in qm_dev_err_handle()
3972 if (err_sts & qm->err_info.ecc_2bits_mask) in qm_dev_err_handle()
3973 qm->err_status.is_dev_ecc_mbit = true; in qm_dev_err_handle()
3975 if (qm->err_ini->log_dev_hw_err) in qm_dev_err_handle()
3976 qm->err_ini->log_dev_hw_err(qm, err_sts); in qm_dev_err_handle()
3978 if (err_sts & qm->err_info.dev_reset_mask) in qm_dev_err_handle()
3981 if (qm->err_ini->clear_dev_hw_err_status) in qm_dev_err_handle()
3982 qm->err_ini->clear_dev_hw_err_status(qm, err_sts); in qm_dev_err_handle()
3988 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm) in qm_process_dev_error() argument
3992 /* log qm error */ in qm_process_dev_error()
3993 qm_ret = qm_hw_error_handle(qm); in qm_process_dev_error()
3996 dev_ret = qm_dev_err_handle(qm); in qm_process_dev_error()
4004 * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
4009 * qm hardware error status when error occur.
4014 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_dev_err_detected() local
4024 ret = qm_process_dev_error(qm); in hisi_qm_dev_err_detected()
4032 static int qm_check_req_recv(struct hisi_qm *qm) in qm_check_req_recv() argument
4034 struct pci_dev *pdev = qm->pdev; in qm_check_req_recv()
4038 if (qm->ver >= QM_HW_V3) in qm_check_req_recv()
4041 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); in qm_check_req_recv()
4042 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, in qm_check_req_recv()
4046 dev_err(&pdev->dev, "Fails to read QM reg!\n"); in qm_check_req_recv()
4050 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); in qm_check_req_recv()
4051 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, in qm_check_req_recv()
4055 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); in qm_check_req_recv()
4060 static int qm_set_pf_mse(struct hisi_qm *qm, bool set) in qm_set_pf_mse() argument
4062 struct pci_dev *pdev = qm->pdev; in qm_set_pf_mse()
4084 static int qm_set_vf_mse(struct hisi_qm *qm, bool set) in qm_set_vf_mse() argument
4086 struct pci_dev *pdev = qm->pdev; in qm_set_vf_mse()
4111 static int qm_vf_reset_prepare(struct hisi_qm *qm, in qm_vf_reset_prepare() argument
4114 struct hisi_qm_list *qm_list = qm->qm_list; in qm_vf_reset_prepare()
4115 struct pci_dev *pdev = qm->pdev; in qm_vf_reset_prepare()
4141 static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd, in qm_try_stop_vfs() argument
4144 struct pci_dev *pdev = qm->pdev; in qm_try_stop_vfs()
4147 if (!qm->vfs_num) in qm_try_stop_vfs()
4151 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { in qm_try_stop_vfs()
4152 ret = qm_ping_all_vfs(qm, cmd); in qm_try_stop_vfs()
4156 ret = qm_vf_reset_prepare(qm, stop_reason); in qm_try_stop_vfs()
4164 static int qm_controller_reset_prepare(struct hisi_qm *qm) in qm_controller_reset_prepare() argument
4166 struct pci_dev *pdev = qm->pdev; in qm_controller_reset_prepare()
4169 ret = qm_reset_prepare_ready(qm); in qm_controller_reset_prepare()
4176 qm_cmd_uninit(qm); in qm_controller_reset_prepare()
4179 ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET); in qm_controller_reset_prepare()
4183 ret = hisi_qm_stop(qm, QM_SOFT_RESET); in qm_controller_reset_prepare()
4185 pci_err(pdev, "Fails to stop QM!\n"); in qm_controller_reset_prepare()
4186 qm_reset_bit_clear(qm); in qm_controller_reset_prepare()
4190 if (qm->use_sva) { in qm_controller_reset_prepare()
4191 ret = qm_hw_err_isolate(qm); in qm_controller_reset_prepare()
4196 ret = qm_wait_vf_prepare_finish(qm); in qm_controller_reset_prepare()
4200 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in qm_controller_reset_prepare()
4205 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) in qm_dev_ecc_mbit_handle() argument
4210 if (qm->ver >= QM_HW_V3) in qm_dev_ecc_mbit_handle()
4213 if (!qm->err_status.is_dev_ecc_mbit && in qm_dev_ecc_mbit_handle()
4214 qm->err_status.is_qm_ecc_mbit && in qm_dev_ecc_mbit_handle()
4215 qm->err_ini->close_axi_master_ooo) { in qm_dev_ecc_mbit_handle()
4216 qm->err_ini->close_axi_master_ooo(qm); in qm_dev_ecc_mbit_handle()
4217 } else if (qm->err_status.is_dev_ecc_mbit && in qm_dev_ecc_mbit_handle()
4218 !qm->err_status.is_qm_ecc_mbit && in qm_dev_ecc_mbit_handle()
4219 !qm->err_ini->close_axi_master_ooo) { in qm_dev_ecc_mbit_handle()
4220 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); in qm_dev_ecc_mbit_handle()
4222 qm->io_base + QM_RAS_NFE_ENABLE); in qm_dev_ecc_mbit_handle()
4223 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); in qm_dev_ecc_mbit_handle()
4227 static int qm_soft_reset(struct hisi_qm *qm) in qm_soft_reset() argument
4229 struct pci_dev *pdev = qm->pdev; in qm_soft_reset()
4233 /* Ensure all doorbells and mailboxes received by QM */ in qm_soft_reset()
4234 ret = qm_check_req_recv(qm); in qm_soft_reset()
4238 if (qm->vfs_num) { in qm_soft_reset()
4239 ret = qm_set_vf_mse(qm, false); in qm_soft_reset()
4246 ret = qm->ops->set_msi(qm, false); in qm_soft_reset()
4252 qm_dev_ecc_mbit_handle(qm); in qm_soft_reset()
4256 qm->io_base + ACC_MASTER_GLOBAL_CTRL); in qm_soft_reset()
4259 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, in qm_soft_reset()
4268 if (qm->err_ini->close_sva_prefetch) in qm_soft_reset()
4269 qm->err_ini->close_sva_prefetch(qm); in qm_soft_reset()
4271 ret = qm_set_pf_mse(qm, false); in qm_soft_reset()
4283 qm->err_info.acpi_rst, in qm_soft_reset()
4302 static int qm_vf_reset_done(struct hisi_qm *qm) in qm_vf_reset_done() argument
4304 struct hisi_qm_list *qm_list = qm->qm_list; in qm_vf_reset_done()
4305 struct pci_dev *pdev = qm->pdev; in qm_vf_reset_done()
4331 static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd) in qm_try_start_vfs() argument
4333 struct pci_dev *pdev = qm->pdev; in qm_try_start_vfs()
4336 if (!qm->vfs_num) in qm_try_start_vfs()
4339 ret = qm_vf_q_assign(qm, qm->vfs_num); in qm_try_start_vfs()
4346 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { in qm_try_start_vfs()
4347 ret = qm_ping_all_vfs(qm, cmd); in qm_try_start_vfs()
4351 ret = qm_vf_reset_done(qm); in qm_try_start_vfs()
4359 static int qm_dev_hw_init(struct hisi_qm *qm) in qm_dev_hw_init() argument
4361 return qm->err_ini->hw_init(qm); in qm_dev_hw_init()
4364 static void qm_restart_prepare(struct hisi_qm *qm) in qm_restart_prepare() argument
4368 if (qm->err_ini->open_sva_prefetch) in qm_restart_prepare()
4369 qm->err_ini->open_sva_prefetch(qm); in qm_restart_prepare()
4371 if (qm->ver >= QM_HW_V3) in qm_restart_prepare()
4374 if (!qm->err_status.is_qm_ecc_mbit && in qm_restart_prepare()
4375 !qm->err_status.is_dev_ecc_mbit) in qm_restart_prepare()
4379 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_prepare()
4380 writel(value & ~qm->err_info.msi_wr_port, in qm_restart_prepare()
4381 qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_prepare()
4384 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; in qm_restart_prepare()
4385 if (value && qm->err_ini->clear_dev_hw_err_status) in qm_restart_prepare()
4386 qm->err_ini->clear_dev_hw_err_status(qm, value); in qm_restart_prepare()
4388 /* clear QM ecc mbit error source */ in qm_restart_prepare()
4389 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_restart_prepare()
4392 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); in qm_restart_prepare()
4395 static void qm_restart_done(struct hisi_qm *qm) in qm_restart_done() argument
4399 if (qm->ver >= QM_HW_V3) in qm_restart_done()
4402 if (!qm->err_status.is_qm_ecc_mbit && in qm_restart_done()
4403 !qm->err_status.is_dev_ecc_mbit) in qm_restart_done()
4407 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_done()
4408 value |= qm->err_info.msi_wr_port; in qm_restart_done()
4409 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_done()
4412 qm->err_status.is_qm_ecc_mbit = false; in qm_restart_done()
4413 qm->err_status.is_dev_ecc_mbit = false; in qm_restart_done()
4416 static int qm_controller_reset_done(struct hisi_qm *qm) in qm_controller_reset_done() argument
4418 struct pci_dev *pdev = qm->pdev; in qm_controller_reset_done()
4421 ret = qm->ops->set_msi(qm, true); in qm_controller_reset_done()
4427 ret = qm_set_pf_mse(qm, true); in qm_controller_reset_done()
4433 if (qm->vfs_num) { in qm_controller_reset_done()
4434 ret = qm_set_vf_mse(qm, true); in qm_controller_reset_done()
4441 ret = qm_dev_hw_init(qm); in qm_controller_reset_done()
4447 qm_restart_prepare(qm); in qm_controller_reset_done()
4448 hisi_qm_dev_err_init(qm); in qm_controller_reset_done()
4449 if (qm->err_ini->open_axi_master_ooo) in qm_controller_reset_done()
4450 qm->err_ini->open_axi_master_ooo(qm); in qm_controller_reset_done()
4452 ret = qm_dev_mem_reset(qm); in qm_controller_reset_done()
4458 ret = qm_restart(qm); in qm_controller_reset_done()
4460 pci_err(pdev, "Failed to start QM!\n"); in qm_controller_reset_done()
4464 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); in qm_controller_reset_done()
4468 ret = qm_wait_vf_prepare_finish(qm); in qm_controller_reset_done()
4472 qm_cmd_init(qm); in qm_controller_reset_done()
4473 qm_restart_done(qm); in qm_controller_reset_done()
4475 qm_reset_bit_clear(qm); in qm_controller_reset_done()
4480 static int qm_controller_reset(struct hisi_qm *qm) in qm_controller_reset() argument
4482 struct pci_dev *pdev = qm->pdev; in qm_controller_reset()
4487 ret = qm_controller_reset_prepare(qm); in qm_controller_reset()
4489 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); in qm_controller_reset()
4490 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); in qm_controller_reset()
4491 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in qm_controller_reset()
4495 hisi_qm_show_last_dfx_regs(qm); in qm_controller_reset()
4496 if (qm->err_ini->show_last_dfx_regs) in qm_controller_reset()
4497 qm->err_ini->show_last_dfx_regs(qm); in qm_controller_reset()
4499 ret = qm_soft_reset(qm); in qm_controller_reset()
4503 ret = qm_controller_reset_done(qm); in qm_controller_reset()
4513 qm_reset_bit_clear(qm); in qm_controller_reset()
4516 if (qm->use_sva) in qm_controller_reset()
4517 qm->isolate_data.is_isolate = true; in qm_controller_reset()
4525 * This function offers QM relate PCIe device reset interface. Drivers which
4526 * use QM can use this function as slot_reset in its struct pci_error_handlers.
4530 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_dev_slot_reset() local
4537 ret = qm_controller_reset(qm); in hisi_qm_dev_slot_reset()
4550 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_reset_prepare() local
4566 ret = qm_reset_prepare_ready(qm); in hisi_qm_reset_prepare()
4573 if (qm->fun_type == QM_HW_PF) in hisi_qm_reset_prepare()
4574 qm_cmd_uninit(qm); in hisi_qm_reset_prepare()
4576 ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_DOWN); in hisi_qm_reset_prepare()
4580 ret = hisi_qm_stop(qm, QM_DOWN); in hisi_qm_reset_prepare()
4582 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret); in hisi_qm_reset_prepare()
4583 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); in hisi_qm_reset_prepare()
4584 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); in hisi_qm_reset_prepare()
4588 ret = qm_wait_vf_prepare_finish(qm); in hisi_qm_reset_prepare()
4599 struct hisi_qm *qm = pci_get_drvdata(pf_pdev); in qm_flr_reset_complete() local
4602 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); in qm_flr_reset_complete()
4614 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_reset_done() local
4617 if (qm->fun_type == QM_HW_PF) { in hisi_qm_reset_done()
4618 ret = qm_dev_hw_init(qm); in hisi_qm_reset_done()
4627 ret = qm_restart(qm); in hisi_qm_reset_done()
4629 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret); in hisi_qm_reset_done()
4633 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); in hisi_qm_reset_done()
4637 ret = qm_wait_vf_prepare_finish(qm); in hisi_qm_reset_done()
4642 if (qm->fun_type == QM_HW_PF) in hisi_qm_reset_done()
4643 qm_cmd_init(qm); in hisi_qm_reset_done()
4648 qm_reset_bit_clear(qm); in hisi_qm_reset_done()
4654 struct hisi_qm *qm = data; in qm_abnormal_irq() local
4657 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); in qm_abnormal_irq()
4658 ret = qm_process_dev_error(qm); in qm_abnormal_irq()
4660 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) && in qm_abnormal_irq()
4661 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl)) in qm_abnormal_irq()
4662 schedule_work(&qm->rst_work); in qm_abnormal_irq()
4671 * This function will stop qm when OS shutdown or rebooting.
4675 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_dev_shutdown() local
4678 ret = hisi_qm_stop(qm, QM_DOWN); in hisi_qm_dev_shutdown()
4680 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); in hisi_qm_dev_shutdown()
4682 hisi_qm_cache_wb(qm); in hisi_qm_dev_shutdown()
4688 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work); in hisi_qm_controller_reset() local
4691 ret = qm_pm_get_sync(qm); in hisi_qm_controller_reset()
4693 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in hisi_qm_controller_reset()
4698 ret = qm_controller_reset(qm); in hisi_qm_controller_reset()
4700 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); in hisi_qm_controller_reset()
4702 qm_pm_put_sync(qm); in hisi_qm_controller_reset()
4705 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm, in qm_pf_reset_vf_prepare() argument
4709 struct pci_dev *pdev = qm->pdev; in qm_pf_reset_vf_prepare()
4712 ret = qm_reset_prepare_ready(qm); in qm_pf_reset_vf_prepare()
4715 atomic_set(&qm->status.flags, QM_STOP); in qm_pf_reset_vf_prepare()
4720 ret = hisi_qm_stop(qm, stop_reason); in qm_pf_reset_vf_prepare()
4722 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret); in qm_pf_reset_vf_prepare()
4723 atomic_set(&qm->status.flags, QM_STOP); in qm_pf_reset_vf_prepare()
4731 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); in qm_pf_reset_vf_prepare()
4732 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); in qm_pf_reset_vf_prepare()
4735 ret = qm_ping_pf(qm, cmd); in qm_pf_reset_vf_prepare()
4740 static void qm_pf_reset_vf_done(struct hisi_qm *qm) in qm_pf_reset_vf_done() argument
4743 struct pci_dev *pdev = qm->pdev; in qm_pf_reset_vf_done()
4747 ret = hisi_qm_start(qm); in qm_pf_reset_vf_done()
4749 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret); in qm_pf_reset_vf_done()
4753 qm_cmd_init(qm); in qm_pf_reset_vf_done()
4754 ret = qm_ping_pf(qm, cmd); in qm_pf_reset_vf_done()
4758 qm_reset_bit_clear(qm); in qm_pf_reset_vf_done()
4761 static int qm_wait_pf_reset_finish(struct hisi_qm *qm) in qm_wait_pf_reset_finish() argument
4763 struct device *dev = &qm->pdev->dev; in qm_wait_pf_reset_finish()
4769 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val, in qm_wait_pf_reset_finish()
4782 ret = qm_get_mb_cmd(qm, &msg, 0); in qm_wait_pf_reset_finish()
4783 qm_clear_cmd_interrupt(qm, 0); in qm_wait_pf_reset_finish()
4798 static void qm_pf_reset_vf_process(struct hisi_qm *qm, in qm_pf_reset_vf_process() argument
4801 struct device *dev = &qm->pdev->dev; in qm_pf_reset_vf_process()
4807 qm_cmd_uninit(qm); in qm_pf_reset_vf_process()
4808 qm_pf_reset_vf_prepare(qm, stop_reason); in qm_pf_reset_vf_process()
4810 ret = qm_wait_pf_reset_finish(qm); in qm_pf_reset_vf_process()
4814 qm_pf_reset_vf_done(qm); in qm_pf_reset_vf_process()
4821 qm_cmd_init(qm); in qm_pf_reset_vf_process()
4822 qm_reset_bit_clear(qm); in qm_pf_reset_vf_process()
4825 static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) in qm_handle_cmd_msg() argument
4827 struct device *dev = &qm->pdev->dev; in qm_handle_cmd_msg()
4836 ret = qm_get_mb_cmd(qm, &msg, fun_num); in qm_handle_cmd_msg()
4837 qm_clear_cmd_interrupt(qm, BIT(fun_num)); in qm_handle_cmd_msg()
4846 qm_pf_reset_vf_process(qm, QM_DOWN); in qm_handle_cmd_msg()
4849 qm_pf_reset_vf_process(qm, QM_SOFT_RESET); in qm_handle_cmd_msg()
4852 qm_vf_get_qos(qm, fun_num); in qm_handle_cmd_msg()
4855 qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT; in qm_handle_cmd_msg()
4865 struct hisi_qm *qm = container_of(cmd_process, in qm_cmd_process() local
4867 u32 vfs_num = qm->vfs_num; in qm_cmd_process()
4871 if (qm->fun_type == QM_HW_PF) { in qm_cmd_process()
4872 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); in qm_cmd_process()
4878 qm_handle_cmd_msg(qm, i); in qm_cmd_process()
4884 qm_handle_cmd_msg(qm, 0); in qm_cmd_process()
4888 * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
4889 * @qm: The qm needs add.
4890 * @qm_list: The qm list.
4892 * This function adds qm to qm list, and will register algorithm to
4893 * crypto when the qm list is empty.
4895 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list) in hisi_qm_alg_register() argument
4897 struct device *dev = &qm->pdev->dev; in hisi_qm_alg_register()
4904 list_add_tail(&qm->list, &qm_list->list); in hisi_qm_alg_register()
4907 if (qm->ver <= QM_HW_V2 && qm->use_sva) { in hisi_qm_alg_register()
4913 ret = qm_list->register_to_crypto(qm); in hisi_qm_alg_register()
4916 list_del(&qm->list); in hisi_qm_alg_register()
4926 * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
4927 * qm list.
4928 * @qm: The qm needs delete.
4929 * @qm_list: The qm list.
4931 * This function deletes qm from qm list, and will unregister algorithm
4932 * from crypto when the qm list is empty.
4934 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list) in hisi_qm_alg_unregister() argument
4937 list_del(&qm->list); in hisi_qm_alg_unregister()
4940 if (qm->ver <= QM_HW_V2 && qm->use_sva) in hisi_qm_alg_unregister()
4944 qm_list->unregister_from_crypto(qm); in hisi_qm_alg_unregister()
4948 static void qm_unregister_abnormal_irq(struct hisi_qm *qm) in qm_unregister_abnormal_irq() argument
4950 struct pci_dev *pdev = qm->pdev; in qm_unregister_abnormal_irq()
4953 if (qm->fun_type == QM_HW_VF) in qm_unregister_abnormal_irq()
4956 val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; in qm_unregister_abnormal_irq()
4961 free_irq(pci_irq_vector(pdev, irq_vector), qm); in qm_unregister_abnormal_irq()
4964 static int qm_register_abnormal_irq(struct hisi_qm *qm) in qm_register_abnormal_irq() argument
4966 struct pci_dev *pdev = qm->pdev; in qm_register_abnormal_irq()
4970 if (qm->fun_type == QM_HW_VF) in qm_register_abnormal_irq()
4973 val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; in qm_register_abnormal_irq()
4978 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); in qm_register_abnormal_irq()
4980 dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret); in qm_register_abnormal_irq()
4985 static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) in qm_unregister_mb_cmd_irq() argument
4987 struct pci_dev *pdev = qm->pdev; in qm_unregister_mb_cmd_irq()
4990 val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val; in qm_unregister_mb_cmd_irq()
4995 free_irq(pci_irq_vector(pdev, irq_vector), qm); in qm_unregister_mb_cmd_irq()
4998 static int qm_register_mb_cmd_irq(struct hisi_qm *qm) in qm_register_mb_cmd_irq() argument
5000 struct pci_dev *pdev = qm->pdev; in qm_register_mb_cmd_irq()
5004 val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val; in qm_register_mb_cmd_irq()
5009 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); in qm_register_mb_cmd_irq()
5016 static void qm_unregister_aeq_irq(struct hisi_qm *qm) in qm_unregister_aeq_irq() argument
5018 struct pci_dev *pdev = qm->pdev; in qm_unregister_aeq_irq()
5021 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val; in qm_unregister_aeq_irq()
5026 free_irq(pci_irq_vector(pdev, irq_vector), qm); in qm_unregister_aeq_irq()
5029 static int qm_register_aeq_irq(struct hisi_qm *qm) in qm_register_aeq_irq() argument
5031 struct pci_dev *pdev = qm->pdev; in qm_register_aeq_irq()
5035 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val; in qm_register_aeq_irq()
5041 qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm); in qm_register_aeq_irq()
5048 static void qm_unregister_eq_irq(struct hisi_qm *qm) in qm_unregister_eq_irq() argument
5050 struct pci_dev *pdev = qm->pdev; in qm_unregister_eq_irq()
5053 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val; in qm_unregister_eq_irq()
5058 free_irq(pci_irq_vector(pdev, irq_vector), qm); in qm_unregister_eq_irq()
5061 static int qm_register_eq_irq(struct hisi_qm *qm) in qm_register_eq_irq() argument
5063 struct pci_dev *pdev = qm->pdev; in qm_register_eq_irq()
5067 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val; in qm_register_eq_irq()
5072 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm); in qm_register_eq_irq()
5079 static void qm_irqs_unregister(struct hisi_qm *qm) in qm_irqs_unregister() argument
5081 qm_unregister_mb_cmd_irq(qm); in qm_irqs_unregister()
5082 qm_unregister_abnormal_irq(qm); in qm_irqs_unregister()
5083 qm_unregister_aeq_irq(qm); in qm_irqs_unregister()
5084 qm_unregister_eq_irq(qm); in qm_irqs_unregister()
5087 static int qm_irqs_register(struct hisi_qm *qm) in qm_irqs_register() argument
5091 ret = qm_register_eq_irq(qm); in qm_irqs_register()
5095 ret = qm_register_aeq_irq(qm); in qm_irqs_register()
5099 ret = qm_register_abnormal_irq(qm); in qm_irqs_register()
5103 ret = qm_register_mb_cmd_irq(qm); in qm_irqs_register()
5110 qm_unregister_abnormal_irq(qm); in qm_irqs_register()
5112 qm_unregister_aeq_irq(qm); in qm_irqs_register()
5114 qm_unregister_eq_irq(qm); in qm_irqs_register()
5118 static int qm_get_qp_num(struct hisi_qm *qm) in qm_get_qp_num() argument
5120 struct device *dev = &qm->pdev->dev; in qm_get_qp_num()
5124 if (qm->fun_type == QM_HW_VF) { in qm_get_qp_num()
5125 if (qm->ver != QM_HW_V1) in qm_get_qp_num()
5127 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); in qm_get_qp_num()
5132 is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); in qm_get_qp_num()
5133 qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true); in qm_get_qp_num()
5134 qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, in qm_get_qp_num()
5137 if (qm->qp_num <= qm->max_qp_num) in qm_get_qp_num()
5140 if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) { in qm_get_qp_num()
5143 qm->qp_num, qm->max_qp_num); in qm_get_qp_num()
5148 qm->qp_num, qm->max_qp_num); in qm_get_qp_num()
5149 qm->qp_num = qm->max_qp_num; in qm_get_qp_num()
5150 qm->debug.curr_qm_qp_num = qm->qp_num; in qm_get_qp_num()
5155 static int qm_pre_store_irq_type_caps(struct hisi_qm *qm) in qm_pre_store_irq_type_caps() argument
5158 struct pci_dev *pdev = qm->pdev; in qm_pre_store_irq_type_caps()
5168 qm_cap[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info, in qm_pre_store_irq_type_caps()
5169 qm_pre_store_caps[i], qm->cap_ver); in qm_pre_store_irq_type_caps()
5172 qm->cap_tables.qm_cap_table = qm_cap; in qm_pre_store_irq_type_caps()
5177 static int qm_get_hw_caps(struct hisi_qm *qm) in qm_get_hw_caps() argument
5179 const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ? in qm_get_hw_caps()
5181 u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) : in qm_get_hw_caps()
5186 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true); in qm_get_hw_caps()
5188 set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); in qm_get_hw_caps()
5190 if (qm->ver >= QM_HW_V3) { in qm_get_hw_caps()
5191 val = readl(qm->io_base + QM_FUNC_CAPS_REG); in qm_get_hw_caps()
5192 qm->cap_ver = val & QM_CAPBILITY_VERSION; in qm_get_hw_caps()
5197 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver); in qm_get_hw_caps()
5199 set_bit(qm_cap_info_comm[i].type, &qm->caps); in qm_get_hw_caps()
5204 val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver); in qm_get_hw_caps()
5206 set_bit(cap_info[i].type, &qm->caps); in qm_get_hw_caps()
5210 return qm_pre_store_irq_type_caps(qm); in qm_get_hw_caps()
5213 static int qm_get_pci_res(struct hisi_qm *qm) in qm_get_pci_res() argument
5215 struct pci_dev *pdev = qm->pdev; in qm_get_pci_res()
5219 ret = pci_request_mem_regions(pdev, qm->dev_name); in qm_get_pci_res()
5225 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); in qm_get_pci_res()
5226 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2)); in qm_get_pci_res()
5227 if (!qm->io_base) { in qm_get_pci_res()
5232 ret = qm_get_hw_caps(qm); in qm_get_pci_res()
5236 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { in qm_get_pci_res()
5237 qm->db_interval = QM_QP_DB_INTERVAL; in qm_get_pci_res()
5238 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); in qm_get_pci_res()
5239 qm->db_io_base = ioremap(qm->db_phys_base, in qm_get_pci_res()
5241 if (!qm->db_io_base) { in qm_get_pci_res()
5246 qm->db_phys_base = qm->phys_base; in qm_get_pci_res()
5247 qm->db_io_base = qm->io_base; in qm_get_pci_res()
5248 qm->db_interval = 0; in qm_get_pci_res()
5251 ret = qm_get_qp_num(qm); in qm_get_pci_res()
5258 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) in qm_get_pci_res()
5259 iounmap(qm->db_io_base); in qm_get_pci_res()
5261 iounmap(qm->io_base); in qm_get_pci_res()
5267 static int hisi_qm_pci_init(struct hisi_qm *qm) in hisi_qm_pci_init() argument
5269 struct pci_dev *pdev = qm->pdev; in hisi_qm_pci_init()
5280 ret = qm_get_pci_res(qm); in hisi_qm_pci_init()
5289 num_vec = qm_get_irq_num(qm); in hisi_qm_pci_init()
5299 qm_put_pci_res(qm); in hisi_qm_pci_init()
5305 static int hisi_qm_init_work(struct hisi_qm *qm) in hisi_qm_init_work() argument
5309 for (i = 0; i < qm->qp_num; i++) in hisi_qm_init_work()
5310 INIT_WORK(&qm->poll_data[i].work, qm_work_process); in hisi_qm_init_work()
5312 if (qm->fun_type == QM_HW_PF) in hisi_qm_init_work()
5313 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); in hisi_qm_init_work()
5315 if (qm->ver > QM_HW_V2) in hisi_qm_init_work()
5316 INIT_WORK(&qm->cmd_process, qm_cmd_process); in hisi_qm_init_work()
5318 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | in hisi_qm_init_work()
5320 pci_name(qm->pdev)); in hisi_qm_init_work()
5321 if (!qm->wq) { in hisi_qm_init_work()
5322 pci_err(qm->pdev, "failed to alloc workqueue!\n"); in hisi_qm_init_work()
5329 static int hisi_qp_alloc_memory(struct hisi_qm *qm) in hisi_qp_alloc_memory() argument
5331 struct device *dev = &qm->pdev->dev; in hisi_qp_alloc_memory()
5336 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); in hisi_qp_alloc_memory()
5337 if (!qm->qp_array) in hisi_qp_alloc_memory()
5340 qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); in hisi_qp_alloc_memory()
5341 if (!qm->poll_data) { in hisi_qp_alloc_memory()
5342 kfree(qm->qp_array); in hisi_qp_alloc_memory()
5346 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); in hisi_qp_alloc_memory()
5349 qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; in hisi_qp_alloc_memory()
5351 for (i = 0; i < qm->qp_num; i++) { in hisi_qp_alloc_memory()
5352 qm->poll_data[i].qm = qm; in hisi_qp_alloc_memory()
5353 ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth); in hisi_qp_alloc_memory()
5362 hisi_qp_memory_uninit(qm, i); in hisi_qp_alloc_memory()
5367 static int hisi_qm_memory_init(struct hisi_qm *qm) in hisi_qm_memory_init() argument
5369 struct device *dev = &qm->pdev->dev; in hisi_qm_memory_init()
5373 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { in hisi_qm_memory_init()
5374 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; in hisi_qm_memory_init()
5375 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); in hisi_qm_memory_init()
5376 if (!qm->factor) in hisi_qm_memory_init()
5380 qm->factor[0].func_qos = QM_QOS_MAX_VAL; in hisi_qm_memory_init()
5383 #define QM_INIT_BUF(qm, type, num) do { \ in hisi_qm_memory_init() argument
5384 (qm)->type = ((qm)->qdma.va + (off)); \ in hisi_qm_memory_init()
5385 (qm)->type##_dma = (qm)->qdma.dma + (off); \ in hisi_qm_memory_init()
5389 idr_init(&qm->qp_idr); in hisi_qm_memory_init()
5390 qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); in hisi_qm_memory_init()
5391 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + in hisi_qm_memory_init()
5392 QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + in hisi_qm_memory_init()
5393 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + in hisi_qm_memory_init()
5394 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); in hisi_qm_memory_init()
5395 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, in hisi_qm_memory_init()
5397 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); in hisi_qm_memory_init()
5398 if (!qm->qdma.va) { in hisi_qm_memory_init()
5403 QM_INIT_BUF(qm, eqe, qm->eq_depth); in hisi_qm_memory_init()
5404 QM_INIT_BUF(qm, aeqe, qm->aeq_depth); in hisi_qm_memory_init()
5405 QM_INIT_BUF(qm, sqc, qm->qp_num); in hisi_qm_memory_init()
5406 QM_INIT_BUF(qm, cqc, qm->qp_num); in hisi_qm_memory_init()
5408 ret = hisi_qp_alloc_memory(qm); in hisi_qm_memory_init()
5415 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); in hisi_qm_memory_init()
5417 idr_destroy(&qm->qp_idr); in hisi_qm_memory_init()
5418 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) in hisi_qm_memory_init()
5419 kfree(qm->factor); in hisi_qm_memory_init()
5425 * hisi_qm_init() - Initialize configures about qm.
5426 * @qm: The qm needing init.
5428 * This function init qm, then we can call hisi_qm_start to put qm into work.
5430 int hisi_qm_init(struct hisi_qm *qm) in hisi_qm_init() argument
5432 struct pci_dev *pdev = qm->pdev; in hisi_qm_init()
5436 hisi_qm_pre_init(qm); in hisi_qm_init()
5438 ret = hisi_qm_pci_init(qm); in hisi_qm_init()
5442 ret = qm_irqs_register(qm); in hisi_qm_init()
5446 if (qm->fun_type == QM_HW_PF) { in hisi_qm_init()
5448 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); in hisi_qm_init()
5449 qm_disable_clock_gate(qm); in hisi_qm_init()
5450 ret = qm_dev_mem_reset(qm); in hisi_qm_init()
5457 if (qm->mode == UACCE_MODE_SVA) { in hisi_qm_init()
5458 ret = qm_alloc_uacce(qm); in hisi_qm_init()
5463 ret = hisi_qm_memory_init(qm); in hisi_qm_init()
5467 ret = hisi_qm_init_work(qm); in hisi_qm_init()
5471 qm_cmd_init(qm); in hisi_qm_init()
5472 atomic_set(&qm->status.flags, QM_INIT); in hisi_qm_init()
5477 hisi_qm_memory_uninit(qm); in hisi_qm_init()
5479 qm_remove_uacce(qm); in hisi_qm_init()
5481 qm_irqs_unregister(qm); in hisi_qm_init()
5483 hisi_qm_pci_uninit(qm); in hisi_qm_init()
5490 * @qm: pointer to accelerator device.
5497 int hisi_qm_get_dfx_access(struct hisi_qm *qm) in hisi_qm_get_dfx_access() argument
5499 struct device *dev = &qm->pdev->dev; in hisi_qm_get_dfx_access()
5506 return qm_pm_get_sync(qm); in hisi_qm_get_dfx_access()
5512 * @qm: pointer to accelerator device.
5516 void hisi_qm_put_dfx_access(struct hisi_qm *qm) in hisi_qm_put_dfx_access() argument
5518 qm_pm_put_sync(qm); in hisi_qm_put_dfx_access()
5523 * hisi_qm_pm_init() - Initialize qm runtime PM.
5524 * @qm: pointer to accelerator device.
5526 * Function that initialize qm runtime PM.
5528 void hisi_qm_pm_init(struct hisi_qm *qm) in hisi_qm_pm_init() argument
5530 struct device *dev = &qm->pdev->dev; in hisi_qm_pm_init()
5532 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in hisi_qm_pm_init()
5542 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
5543 * @qm: pointer to accelerator device.
5545 * Function that uninitialize qm runtime PM.
5547 void hisi_qm_pm_uninit(struct hisi_qm *qm) in hisi_qm_pm_uninit() argument
5549 struct device *dev = &qm->pdev->dev; in hisi_qm_pm_uninit()
5551 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) in hisi_qm_pm_uninit()
5559 static int qm_prepare_for_suspend(struct hisi_qm *qm) in qm_prepare_for_suspend() argument
5561 struct pci_dev *pdev = qm->pdev; in qm_prepare_for_suspend()
5565 ret = qm->ops->set_msi(qm, false); in qm_prepare_for_suspend()
5573 qm->io_base + ACC_MASTER_GLOBAL_CTRL); in qm_prepare_for_suspend()
5575 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, in qm_prepare_for_suspend()
5584 ret = qm_set_pf_mse(qm, false); in qm_prepare_for_suspend()
5591 static int qm_rebuild_for_resume(struct hisi_qm *qm) in qm_rebuild_for_resume() argument
5593 struct pci_dev *pdev = qm->pdev; in qm_rebuild_for_resume()
5596 ret = qm_set_pf_mse(qm, true); in qm_rebuild_for_resume()
5602 ret = qm->ops->set_msi(qm, true); in qm_rebuild_for_resume()
5608 ret = qm_dev_hw_init(qm); in qm_rebuild_for_resume()
5614 qm_cmd_init(qm); in qm_rebuild_for_resume()
5615 hisi_qm_dev_err_init(qm); in qm_rebuild_for_resume()
5617 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); in qm_rebuild_for_resume()
5618 qm_disable_clock_gate(qm); in qm_rebuild_for_resume()
5619 ret = qm_dev_mem_reset(qm); in qm_rebuild_for_resume()
5635 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_suspend() local
5640 ret = hisi_qm_stop(qm, QM_NORMAL); in hisi_qm_suspend()
5642 pci_err(pdev, "failed to stop qm(%d)\n", ret); in hisi_qm_suspend()
5646 ret = qm_prepare_for_suspend(qm); in hisi_qm_suspend()
5663 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_resume() local
5668 ret = qm_rebuild_for_resume(qm); in hisi_qm_resume()
5674 ret = hisi_qm_start(qm); in hisi_qm_resume()
5676 if (qm_check_dev_error(qm)) { in hisi_qm_resume()
5677 pci_info(pdev, "failed to start qm due to device error, device will be reset!\n"); in hisi_qm_resume()
5681 pci_err(pdev, "failed to start qm(%d)!\n", ret); in hisi_qm_resume()