• Home
  • Raw
  • Download

Lines Matching refs:qm

318 	struct hisi_qm *qm;  member
324 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
325 void (*qm_db)(struct hisi_qm *qm, u16 qn,
327 u32 (*get_irq_num)(struct hisi_qm *qm);
328 int (*debug_init)(struct hisi_qm *qm);
329 void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
330 void (*hw_error_uninit)(struct hisi_qm *qm);
331 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
390 static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new) in qm_avail_state() argument
392 enum qm_state curr = atomic_read(&qm->status.flags); in qm_avail_state()
412 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n", in qm_avail_state()
416 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n", in qm_avail_state()
422 static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp, in qm_qp_avail_state() argument
425 enum qm_state qm_curr = atomic_read(&qm->status.flags); in qm_qp_avail_state()
458 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n", in qm_qp_avail_state()
462 dev_warn(&qm->pdev->dev, in qm_qp_avail_state()
470 static int qm_wait_mb_ready(struct hisi_qm *qm) in qm_wait_mb_ready() argument
474 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, in qm_wait_mb_ready()
480 static void qm_mb_write(struct hisi_qm *qm, const void *src) in qm_mb_write() argument
482 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; in qm_mb_write()
501 static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, in qm_mb() argument
507 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n", in qm_mb()
518 mutex_lock(&qm->mailbox_lock); in qm_mb()
520 if (unlikely(qm_wait_mb_ready(qm))) { in qm_mb()
522 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); in qm_mb()
526 qm_mb_write(qm, &mailbox); in qm_mb()
528 if (unlikely(qm_wait_mb_ready(qm))) { in qm_mb()
530 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); in qm_mb()
535 mutex_unlock(&qm->mailbox_lock); in qm_mb()
538 atomic64_inc(&qm->debug.dfx.mb_err_cnt); in qm_mb()
542 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) in qm_db_v1() argument
550 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); in qm_db_v1()
553 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) in qm_db_v2() argument
569 writeq(doorbell, qm->io_base + dbase); in qm_db_v2()
572 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) in qm_db() argument
574 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", in qm_db()
577 qm->ops->qm_db(qm, qn, cmd, index, priority); in qm_db()
580 static int qm_dev_mem_reset(struct hisi_qm *qm) in qm_dev_mem_reset() argument
584 writel(0x1, qm->io_base + QM_MEM_START_INIT); in qm_dev_mem_reset()
585 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, in qm_dev_mem_reset()
589 static u32 qm_get_irq_num_v1(struct hisi_qm *qm) in qm_get_irq_num_v1() argument
594 static u32 qm_get_irq_num_v2(struct hisi_qm *qm) in qm_get_irq_num_v2() argument
596 if (qm->fun_type == QM_HW_PF) in qm_get_irq_num_v2()
602 static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe) in qm_to_hisi_qp() argument
606 return &qm->qp_array[cqn]; in qm_to_hisi_qp()
619 static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm) in qm_poll_qp() argument
631 qp->req_cb(qp, qp->sqe + qm->sqe_size * in qm_poll_qp()
635 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, in qm_poll_qp()
641 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, in qm_poll_qp()
648 struct hisi_qm *qm = container_of(work, struct hisi_qm, work); in qm_work_process() local
649 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; in qm_work_process()
653 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { in qm_work_process()
655 qp = qm_to_hisi_qp(qm, eqe); in qm_work_process()
656 qm_poll_qp(qp, qm); in qm_work_process()
658 if (qm->status.eq_head == QM_EQ_DEPTH - 1) { in qm_work_process()
659 qm->status.eqc_phase = !qm->status.eqc_phase; in qm_work_process()
660 eqe = qm->eqe; in qm_work_process()
661 qm->status.eq_head = 0; in qm_work_process()
664 qm->status.eq_head++; in qm_work_process()
669 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_work_process()
673 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_work_process()
678 struct hisi_qm *qm = (struct hisi_qm *)data; in do_qm_irq() local
681 if (qm->wq) in do_qm_irq()
682 queue_work(qm->wq, &qm->work); in do_qm_irq()
684 schedule_work(&qm->work); in do_qm_irq()
691 struct hisi_qm *qm = data; in qm_irq() local
693 if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) in qm_irq()
696 atomic64_inc(&qm->debug.dfx.err_irq_cnt); in qm_irq()
697 dev_err(&qm->pdev->dev, "invalid int source\n"); in qm_irq()
698 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_irq()
705 struct hisi_qm *qm = data; in qm_aeq_irq() local
706 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; in qm_aeq_irq()
709 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); in qm_aeq_irq()
710 if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE)) in qm_aeq_irq()
713 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { in qm_aeq_irq()
716 dev_err(&qm->pdev->dev, "%s overflow\n", in qm_aeq_irq()
719 dev_err(&qm->pdev->dev, "unknown error type %d\n", in qm_aeq_irq()
722 if (qm->status.aeq_head == QM_Q_DEPTH - 1) { in qm_aeq_irq()
723 qm->status.aeqc_phase = !qm->status.aeqc_phase; in qm_aeq_irq()
724 aeqe = qm->aeqe; in qm_aeq_irq()
725 qm->status.aeq_head = 0; in qm_aeq_irq()
728 qm->status.aeq_head++; in qm_aeq_irq()
731 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); in qm_aeq_irq()
737 static void qm_irq_unregister(struct hisi_qm *qm) in qm_irq_unregister() argument
739 struct pci_dev *pdev = qm->pdev; in qm_irq_unregister()
741 free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); in qm_irq_unregister()
743 if (qm->ver == QM_HW_V1) in qm_irq_unregister()
746 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); in qm_irq_unregister()
748 if (qm->fun_type == QM_HW_PF) in qm_irq_unregister()
750 QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); in qm_irq_unregister()
763 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, in qm_vft_data_cfg() argument
771 if (qm->ver == QM_HW_V1) { in qm_vft_data_cfg()
784 if (qm->ver == QM_HW_V1) { in qm_vft_data_cfg()
796 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); in qm_vft_data_cfg()
797 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); in qm_vft_data_cfg()
800 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, in qm_set_vft_common() argument
806 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_set_vft_common()
811 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); in qm_set_vft_common()
812 writel(type, qm->io_base + QM_VFT_CFG_TYPE); in qm_set_vft_common()
813 writel(fun_num, qm->io_base + QM_VFT_CFG); in qm_set_vft_common()
815 qm_vft_data_cfg(qm, type, base, number); in qm_set_vft_common()
817 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); in qm_set_vft_common()
818 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); in qm_set_vft_common()
820 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_set_vft_common()
825 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, in qm_set_sqc_cqc_vft() argument
831 ret = qm_set_vft_common(qm, i, fun_num, base, number); in qm_set_sqc_cqc_vft()
839 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) in qm_get_vft_v2() argument
844 ret = qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); in qm_get_vft_v2()
848 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | in qm_get_vft_v2()
849 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); in qm_get_vft_v2()
866 struct hisi_qm *qm = file_to_qm(file); in current_q_read() local
868 return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT; in current_q_read()
873 struct hisi_qm *qm = file_to_qm(file); in current_q_write() local
876 if (val >= qm->debug.curr_qm_qp_num) in current_q_write()
880 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK); in current_q_write()
881 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); in current_q_write()
884 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK); in current_q_write()
885 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); in current_q_write()
892 struct hisi_qm *qm = file_to_qm(file); in clear_enable_read() local
894 return readl(qm->io_base + QM_DFX_CNT_CLR_CE); in clear_enable_read()
900 struct hisi_qm *qm = file_to_qm(file); in clear_enable_write() local
905 writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE); in clear_enable_write()
1034 struct hisi_qm *qm = s->private; in qm_regs_show() local
1038 if (qm->fun_type == QM_HW_PF) in qm_regs_show()
1044 val = readl(qm->io_base + regs->reg_offset); in qm_regs_show()
1066 static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, in qm_ctx_alloc() argument
1069 struct device *dev = &qm->pdev->dev; in qm_ctx_alloc()
1086 static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, in qm_ctx_free() argument
1089 struct device *dev = &qm->pdev->dev; in qm_ctx_free()
1095 static int dump_show(struct hisi_qm *qm, void *info, in dump_show() argument
1098 struct device *dev = &qm->pdev->dev; in dump_show()
1130 static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) in qm_dump_sqc_raw() argument
1132 return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); in qm_dump_sqc_raw()
1135 static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) in qm_dump_cqc_raw() argument
1137 return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); in qm_dump_cqc_raw()
1140 static int qm_sqc_dump(struct hisi_qm *qm, const char *s) in qm_sqc_dump() argument
1142 struct device *dev = &qm->pdev->dev; in qm_sqc_dump()
1152 if (ret || qp_id >= qm->qp_num) { in qm_sqc_dump()
1153 dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1); in qm_sqc_dump()
1157 sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma); in qm_sqc_dump()
1161 ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id); in qm_sqc_dump()
1163 down_read(&qm->qps_lock); in qm_sqc_dump()
1164 if (qm->sqc) { in qm_sqc_dump()
1165 sqc_curr = qm->sqc + qp_id; in qm_sqc_dump()
1167 ret = dump_show(qm, sqc_curr, sizeof(*sqc), in qm_sqc_dump()
1172 up_read(&qm->qps_lock); in qm_sqc_dump()
1177 ret = dump_show(qm, sqc, sizeof(*sqc), "SQC"); in qm_sqc_dump()
1182 qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma); in qm_sqc_dump()
1186 static int qm_cqc_dump(struct hisi_qm *qm, const char *s) in qm_cqc_dump() argument
1188 struct device *dev = &qm->pdev->dev; in qm_cqc_dump()
1198 if (ret || qp_id >= qm->qp_num) { in qm_cqc_dump()
1199 dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1); in qm_cqc_dump()
1203 cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma); in qm_cqc_dump()
1207 ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id); in qm_cqc_dump()
1209 down_read(&qm->qps_lock); in qm_cqc_dump()
1210 if (qm->cqc) { in qm_cqc_dump()
1211 cqc_curr = qm->cqc + qp_id; in qm_cqc_dump()
1213 ret = dump_show(qm, cqc_curr, sizeof(*cqc), in qm_cqc_dump()
1218 up_read(&qm->qps_lock); in qm_cqc_dump()
1223 ret = dump_show(qm, cqc, sizeof(*cqc), "CQC"); in qm_cqc_dump()
1228 qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma); in qm_cqc_dump()
1232 static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size, in qm_eqc_aeqc_dump() argument
1235 struct device *dev = &qm->pdev->dev; in qm_eqc_aeqc_dump()
1245 xeqc = qm_ctx_alloc(qm, size, &xeqc_dma); in qm_eqc_aeqc_dump()
1249 ret = qm_mb(qm, cmd, xeqc_dma, 0, 1); in qm_eqc_aeqc_dump()
1253 ret = dump_show(qm, xeqc, size, name); in qm_eqc_aeqc_dump()
1258 qm_ctx_free(qm, size, xeqc, &xeqc_dma); in qm_eqc_aeqc_dump()
1262 static int q_dump_param_parse(struct hisi_qm *qm, char *s, in q_dump_param_parse() argument
1265 struct device *dev = &qm->pdev->dev; in q_dump_param_parse()
1266 unsigned int qp_num = qm->qp_num; in q_dump_param_parse()
1302 static int qm_sq_dump(struct hisi_qm *qm, char *s) in qm_sq_dump() argument
1304 struct device *dev = &qm->pdev->dev; in qm_sq_dump()
1310 ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id); in qm_sq_dump()
1314 sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL); in qm_sq_dump()
1318 qp = &qm->qp_array[qp_id]; in qm_sq_dump()
1319 memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH); in qm_sq_dump()
1320 sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size); in qm_sq_dump()
1321 memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK, in qm_sq_dump()
1322 qm->debug.sqe_mask_len); in qm_sq_dump()
1324 ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE"); in qm_sq_dump()
1333 static int qm_cq_dump(struct hisi_qm *qm, char *s) in qm_cq_dump() argument
1335 struct device *dev = &qm->pdev->dev; in qm_cq_dump()
1341 ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id); in qm_cq_dump()
1345 qp = &qm->qp_array[qp_id]; in qm_cq_dump()
1347 ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE"); in qm_cq_dump()
1354 static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s, in qm_eq_aeq_dump() argument
1357 struct device *dev = &qm->pdev->dev; in qm_eq_aeq_dump()
1377 down_read(&qm->qps_lock); in qm_eq_aeq_dump()
1379 if (qm->eqe && !strcmp(name, "EQE")) { in qm_eq_aeq_dump()
1380 xeqe = qm->eqe + xeqe_id; in qm_eq_aeq_dump()
1381 } else if (qm->aeqe && !strcmp(name, "AEQE")) { in qm_eq_aeq_dump()
1382 xeqe = qm->aeqe + xeqe_id; in qm_eq_aeq_dump()
1388 ret = dump_show(qm, xeqe, size, name); in qm_eq_aeq_dump()
1393 up_read(&qm->qps_lock); in qm_eq_aeq_dump()
1397 static int qm_dbg_help(struct hisi_qm *qm, char *s) in qm_dbg_help() argument
1399 struct device *dev = &qm->pdev->dev; in qm_dbg_help()
1419 static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf) in qm_cmd_write_dump() argument
1421 struct device *dev = &qm->pdev->dev; in qm_cmd_write_dump()
1437 ret = qm_sqc_dump(qm, s); in qm_cmd_write_dump()
1439 ret = qm_cqc_dump(qm, s); in qm_cmd_write_dump()
1441 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc), in qm_cmd_write_dump()
1444 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc), in qm_cmd_write_dump()
1447 ret = qm_sq_dump(qm, s); in qm_cmd_write_dump()
1449 ret = qm_cq_dump(qm, s); in qm_cmd_write_dump()
1451 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE"); in qm_cmd_write_dump()
1453 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE"); in qm_cmd_write_dump()
1455 ret = qm_dbg_help(qm, s); in qm_cmd_write_dump()
1471 struct hisi_qm *qm = filp->private_data; in qm_cmd_write() local
1479 if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) in qm_cmd_write()
1502 ret = qm_cmd_write_dump(qm, cmd_buf); in qm_cmd_write()
1520 static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index) in qm_create_debugfs_file() argument
1522 struct dentry *qm_d = qm->debug.qm_d; in qm_create_debugfs_file()
1523 struct debugfs_file *file = qm->debug.files + index; in qm_create_debugfs_file()
1530 file->debug = &qm->debug; in qm_create_debugfs_file()
1535 static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) in qm_hw_error_init_v1() argument
1537 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v1()
1540 static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) in qm_hw_error_init_v2() argument
1545 qm->error_mask = ce | nfe | fe; in qm_hw_error_init_v2()
1549 qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_hw_error_init_v2()
1552 writel(ce, qm->io_base + QM_RAS_CE_ENABLE); in qm_hw_error_init_v2()
1553 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); in qm_hw_error_init_v2()
1554 writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE); in qm_hw_error_init_v2()
1555 writel(fe, qm->io_base + QM_RAS_FE_ENABLE); in qm_hw_error_init_v2()
1557 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v2()
1558 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v2()
1561 static void qm_hw_error_uninit_v2(struct hisi_qm *qm) in qm_hw_error_uninit_v2() argument
1563 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v2()
1566 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) in qm_log_hw_error() argument
1569 struct device *dev = &qm->pdev->dev; in qm_log_hw_error()
1582 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01); in qm_log_hw_error()
1589 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00); in qm_log_hw_error()
1603 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) in qm_hw_error_handle_v2() argument
1608 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); in qm_hw_error_handle_v2()
1609 error_status = qm->error_mask & tmp; in qm_hw_error_handle_v2()
1613 qm->err_status.is_qm_ecc_mbit = true; in qm_hw_error_handle_v2()
1615 qm_log_hw_error(qm, error_status); in qm_hw_error_handle_v2()
1617 writel(error_status, qm->io_base + in qm_hw_error_handle_v2()
1651 return qp->sqe + sq_tail * qp->qm->sqe_size; in qm_get_avail_sqe()
1654 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) in qm_create_qp_nolock() argument
1656 struct device *dev = &qm->pdev->dev; in qm_create_qp_nolock()
1660 if (!qm_qp_avail_state(qm, NULL, QP_INIT)) in qm_create_qp_nolock()
1663 if (qm->qp_in_used == qm->qp_num) { in qm_create_qp_nolock()
1665 qm->qp_num); in qm_create_qp_nolock()
1666 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); in qm_create_qp_nolock()
1670 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); in qm_create_qp_nolock()
1673 qm->qp_num); in qm_create_qp_nolock()
1674 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); in qm_create_qp_nolock()
1678 qp = &qm->qp_array[qp_id]; in qm_create_qp_nolock()
1686 qm->qp_in_used++; in qm_create_qp_nolock()
1700 struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) in hisi_qm_create_qp() argument
1704 down_write(&qm->qps_lock); in hisi_qm_create_qp()
1705 qp = qm_create_qp_nolock(qm, alg_type); in hisi_qm_create_qp()
1706 up_write(&qm->qps_lock); in hisi_qm_create_qp()
1720 struct hisi_qm *qm = qp->qm; in hisi_qm_release_qp() local
1722 down_write(&qm->qps_lock); in hisi_qm_release_qp()
1724 if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) { in hisi_qm_release_qp()
1725 up_write(&qm->qps_lock); in hisi_qm_release_qp()
1729 qm->qp_in_used--; in hisi_qm_release_qp()
1730 idr_remove(&qm->qp_idr, qp->qp_id); in hisi_qm_release_qp()
1732 up_write(&qm->qps_lock); in hisi_qm_release_qp()
1738 struct hisi_qm *qm = qp->qm; in qm_qp_ctx_cfg() local
1739 struct device *dev = &qm->pdev->dev; in qm_qp_ctx_cfg()
1740 enum qm_hw_ver ver = qm->ver; in qm_qp_ctx_cfg()
1761 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); in qm_qp_ctx_cfg()
1764 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size)); in qm_qp_ctx_cfg()
1770 ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); in qm_qp_ctx_cfg()
1796 ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); in qm_qp_ctx_cfg()
1805 struct hisi_qm *qm = qp->qm; in qm_start_qp_nolock() local
1806 struct device *dev = &qm->pdev->dev; in qm_start_qp_nolock()
1811 if (!qm_qp_avail_state(qm, qp, QP_START)) in qm_start_qp_nolock()
1834 struct hisi_qm *qm = qp->qm; in hisi_qm_start_qp() local
1837 down_write(&qm->qps_lock); in hisi_qm_start_qp()
1839 up_write(&qm->qps_lock); in hisi_qm_start_qp()
1852 struct hisi_qm *qm = qp->qm; in qm_drain_qp() local
1853 struct device *dev = &qm->pdev->dev; in qm_drain_qp()
1864 if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit) in qm_drain_qp()
1867 addr = qm_ctx_alloc(qm, size, &dma_addr); in qm_drain_qp()
1874 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id); in qm_drain_qp()
1881 ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)), in qm_drain_qp()
1902 qm_ctx_free(qm, size, addr, &dma_addr); in qm_drain_qp()
1909 struct device *dev = &qp->qm->pdev->dev; in qm_stop_qp_nolock()
1923 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP)) in qm_stop_qp_nolock()
1932 if (qp->qm->wq) in qm_stop_qp_nolock()
1933 flush_workqueue(qp->qm->wq); in qm_stop_qp_nolock()
1935 flush_work(&qp->qm->work); in qm_stop_qp_nolock()
1952 down_write(&qp->qm->qps_lock); in hisi_qm_stop_qp()
1954 up_write(&qp->qm->qps_lock); in hisi_qm_stop_qp()
1983 atomic_read(&qp->qm->status.flags) == QM_STOP || in hisi_qp_send()
1985 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); in hisi_qp_send()
1992 memcpy(sqe, msg, qp->qm->sqe_size); in hisi_qp_send()
1994 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); in hisi_qp_send()
2002 static void hisi_qm_cache_wb(struct hisi_qm *qm) in hisi_qm_cache_wb() argument
2006 if (qm->ver == QM_HW_V1) in hisi_qm_cache_wb()
2009 writel(0x1, qm->io_base + QM_CACHE_WB_START); in hisi_qm_cache_wb()
2010 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, in hisi_qm_cache_wb()
2012 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); in hisi_qm_cache_wb()
2029 struct hisi_qm *qm = uacce->priv; in hisi_qm_uacce_get_queue() local
2033 qp = hisi_qm_create_qp(qm, alg_type); in hisi_qm_uacce_get_queue()
2050 hisi_qm_cache_wb(qp->qm); in hisi_qm_uacce_put_queue()
2060 struct hisi_qm *qm = qp->qm; in hisi_qm_uacce_mmap() local
2062 struct pci_dev *pdev = qm->pdev; in hisi_qm_uacce_mmap()
2069 if (qm->ver == QM_HW_V1) { in hisi_qm_uacce_mmap()
2081 qm->phys_base >> PAGE_SHIFT, in hisi_qm_uacce_mmap()
2117 struct hisi_qm *qm = q->uacce->priv; in qm_set_sqctype() local
2120 down_write(&qm->qps_lock); in qm_set_sqctype()
2122 up_write(&qm->qps_lock); in qm_set_sqctype()
2164 static int qm_alloc_uacce(struct hisi_qm *qm) in qm_alloc_uacce() argument
2166 struct pci_dev *pdev = qm->pdev; in qm_alloc_uacce()
2186 qm->use_sva = true; in qm_alloc_uacce()
2190 qm->uacce = NULL; in qm_alloc_uacce()
2195 uacce->priv = qm; in qm_alloc_uacce()
2196 uacce->algs = qm->algs; in qm_alloc_uacce()
2198 if (qm->ver == QM_HW_V1) { in qm_alloc_uacce()
2207 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH + in qm_alloc_uacce()
2213 qm->uacce = uacce; in qm_alloc_uacce()
2225 static int qm_frozen(struct hisi_qm *qm) in qm_frozen() argument
2227 down_write(&qm->qps_lock); in qm_frozen()
2229 if (qm->is_frozen) { in qm_frozen()
2230 up_write(&qm->qps_lock); in qm_frozen()
2234 if (!qm->qp_in_used) { in qm_frozen()
2235 qm->qp_in_used = qm->qp_num; in qm_frozen()
2236 qm->is_frozen = true; in qm_frozen()
2237 up_write(&qm->qps_lock); in qm_frozen()
2241 up_write(&qm->qps_lock); in qm_frozen()
2249 struct hisi_qm *qm, *vf_qm; in qm_try_frozen_vfs() local
2258 list_for_each_entry(qm, &qm_list->list, list) { in qm_try_frozen_vfs()
2259 dev = qm->pdev; in qm_try_frozen_vfs()
2282 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list) in hisi_qm_wait_task_finish() argument
2284 while (qm_frozen(qm) || in hisi_qm_wait_task_finish()
2285 ((qm->fun_type == QM_HW_PF) && in hisi_qm_wait_task_finish()
2286 qm_try_frozen_vfs(qm->pdev, qm_list))) { in hisi_qm_wait_task_finish()
2300 int hisi_qm_get_free_qp_num(struct hisi_qm *qm) in hisi_qm_get_free_qp_num() argument
2304 down_read(&qm->qps_lock); in hisi_qm_get_free_qp_num()
2305 ret = qm->qp_num - qm->qp_in_used; in hisi_qm_get_free_qp_num()
2306 up_read(&qm->qps_lock); in hisi_qm_get_free_qp_num()
2312 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) in hisi_qp_memory_uninit() argument
2314 struct device *dev = &qm->pdev->dev; in hisi_qp_memory_uninit()
2319 qdma = &qm->qp_array[i].qdma; in hisi_qp_memory_uninit()
2323 kfree(qm->qp_array); in hisi_qp_memory_uninit()
2326 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id) in hisi_qp_memory_init() argument
2328 struct device *dev = &qm->pdev->dev; in hisi_qp_memory_init()
2329 size_t off = qm->sqe_size * QM_Q_DEPTH; in hisi_qp_memory_init()
2332 qp = &qm->qp_array[id]; in hisi_qp_memory_init()
2343 qp->qm = qm; in hisi_qp_memory_init()
2349 static int hisi_qm_memory_init(struct hisi_qm *qm) in hisi_qm_memory_init() argument
2351 struct device *dev = &qm->pdev->dev; in hisi_qm_memory_init()
2355 #define QM_INIT_BUF(qm, type, num) do { \ in hisi_qm_memory_init() argument
2356 (qm)->type = ((qm)->qdma.va + (off)); \ in hisi_qm_memory_init()
2357 (qm)->type##_dma = (qm)->qdma.dma + (off); \ in hisi_qm_memory_init()
2361 idr_init(&qm->qp_idr); in hisi_qm_memory_init()
2362 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) + in hisi_qm_memory_init()
2364 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + in hisi_qm_memory_init()
2365 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); in hisi_qm_memory_init()
2366 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, in hisi_qm_memory_init()
2368 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); in hisi_qm_memory_init()
2369 if (!qm->qdma.va) in hisi_qm_memory_init()
2372 QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH); in hisi_qm_memory_init()
2373 QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH); in hisi_qm_memory_init()
2374 QM_INIT_BUF(qm, sqc, qm->qp_num); in hisi_qm_memory_init()
2375 QM_INIT_BUF(qm, cqc, qm->qp_num); in hisi_qm_memory_init()
2377 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); in hisi_qm_memory_init()
2378 if (!qm->qp_array) { in hisi_qm_memory_init()
2384 qp_dma_size = qm->sqe_size * QM_Q_DEPTH + in hisi_qm_memory_init()
2387 for (i = 0; i < qm->qp_num; i++) { in hisi_qm_memory_init()
2388 ret = hisi_qp_memory_init(qm, qp_dma_size, i); in hisi_qm_memory_init()
2398 hisi_qp_memory_uninit(qm, i); in hisi_qm_memory_init()
2400 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); in hisi_qm_memory_init()
2405 static void hisi_qm_pre_init(struct hisi_qm *qm) in hisi_qm_pre_init() argument
2407 struct pci_dev *pdev = qm->pdev; in hisi_qm_pre_init()
2409 if (qm->ver == QM_HW_V1) in hisi_qm_pre_init()
2410 qm->ops = &qm_hw_ops_v1; in hisi_qm_pre_init()
2412 qm->ops = &qm_hw_ops_v2; in hisi_qm_pre_init()
2414 pci_set_drvdata(pdev, qm); in hisi_qm_pre_init()
2415 mutex_init(&qm->mailbox_lock); in hisi_qm_pre_init()
2416 init_rwsem(&qm->qps_lock); in hisi_qm_pre_init()
2417 qm->qp_in_used = 0; in hisi_qm_pre_init()
2418 qm->is_frozen = false; in hisi_qm_pre_init()
2427 void hisi_qm_uninit(struct hisi_qm *qm) in hisi_qm_uninit() argument
2429 struct pci_dev *pdev = qm->pdev; in hisi_qm_uninit()
2432 down_write(&qm->qps_lock); in hisi_qm_uninit()
2434 if (!qm_avail_state(qm, QM_CLOSE)) { in hisi_qm_uninit()
2435 up_write(&qm->qps_lock); in hisi_qm_uninit()
2439 uacce_remove(qm->uacce); in hisi_qm_uninit()
2440 qm->uacce = NULL; in hisi_qm_uninit()
2442 hisi_qp_memory_uninit(qm, qm->qp_num); in hisi_qm_uninit()
2443 idr_destroy(&qm->qp_idr); in hisi_qm_uninit()
2445 if (qm->qdma.va) { in hisi_qm_uninit()
2446 hisi_qm_cache_wb(qm); in hisi_qm_uninit()
2447 dma_free_coherent(dev, qm->qdma.size, in hisi_qm_uninit()
2448 qm->qdma.va, qm->qdma.dma); in hisi_qm_uninit()
2449 memset(&qm->qdma, 0, sizeof(qm->qdma)); in hisi_qm_uninit()
2452 qm_irq_unregister(qm); in hisi_qm_uninit()
2454 iounmap(qm->io_base); in hisi_qm_uninit()
2458 up_write(&qm->qps_lock); in hisi_qm_uninit()
2474 int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) in hisi_qm_get_vft() argument
2479 if (!qm->ops->get_vft) { in hisi_qm_get_vft()
2480 dev_err(&qm->pdev->dev, "Don't support vft read!\n"); in hisi_qm_get_vft()
2484 return qm->ops->get_vft(qm, base, number); in hisi_qm_get_vft()
2496 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, in hisi_qm_set_vft() argument
2499 u32 max_q_num = qm->ctrl_qp_num; in hisi_qm_set_vft()
2505 return qm_set_sqc_cqc_vft(qm, fun_num, base, number); in hisi_qm_set_vft()
2508 static void qm_init_eq_aeq_status(struct hisi_qm *qm) in qm_init_eq_aeq_status() argument
2510 struct hisi_qm_status *status = &qm->status; in qm_init_eq_aeq_status()
2518 static int qm_eq_ctx_cfg(struct hisi_qm *qm) in qm_eq_ctx_cfg() argument
2520 struct device *dev = &qm->pdev->dev; in qm_eq_ctx_cfg()
2527 qm_init_eq_aeq_status(qm); in qm_eq_ctx_cfg()
2539 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); in qm_eq_ctx_cfg()
2540 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); in qm_eq_ctx_cfg()
2541 if (qm->ver == QM_HW_V1) in qm_eq_ctx_cfg()
2544 ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); in qm_eq_ctx_cfg()
2560 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); in qm_eq_ctx_cfg()
2561 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); in qm_eq_ctx_cfg()
2564 ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); in qm_eq_ctx_cfg()
2571 static int __hisi_qm_start(struct hisi_qm *qm) in __hisi_qm_start() argument
2575 WARN_ON(!qm->qdma.dma); in __hisi_qm_start()
2577 if (qm->fun_type == QM_HW_PF) { in __hisi_qm_start()
2578 ret = qm_dev_mem_reset(qm); in __hisi_qm_start()
2582 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); in __hisi_qm_start()
2587 ret = qm_eq_ctx_cfg(qm); in __hisi_qm_start()
2591 ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); in __hisi_qm_start()
2595 ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); in __hisi_qm_start()
2599 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); in __hisi_qm_start()
2600 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); in __hisi_qm_start()
2611 int hisi_qm_start(struct hisi_qm *qm) in hisi_qm_start() argument
2613 struct device *dev = &qm->pdev->dev; in hisi_qm_start()
2616 down_write(&qm->qps_lock); in hisi_qm_start()
2618 if (!qm_avail_state(qm, QM_START)) { in hisi_qm_start()
2619 up_write(&qm->qps_lock); in hisi_qm_start()
2623 dev_dbg(dev, "qm start with %d queue pairs\n", qm->qp_num); in hisi_qm_start()
2625 if (!qm->qp_num) { in hisi_qm_start()
2631 ret = __hisi_qm_start(qm); in hisi_qm_start()
2633 atomic_set(&qm->status.flags, QM_START); in hisi_qm_start()
2636 up_write(&qm->qps_lock); in hisi_qm_start()
2641 static int qm_restart(struct hisi_qm *qm) in qm_restart() argument
2643 struct device *dev = &qm->pdev->dev; in qm_restart()
2647 ret = hisi_qm_start(qm); in qm_restart()
2651 down_write(&qm->qps_lock); in qm_restart()
2652 for (i = 0; i < qm->qp_num; i++) { in qm_restart()
2653 qp = &qm->qp_array[i]; in qm_restart()
2660 up_write(&qm->qps_lock); in qm_restart()
2666 up_write(&qm->qps_lock); in qm_restart()
2672 static int qm_stop_started_qp(struct hisi_qm *qm) in qm_stop_started_qp() argument
2674 struct device *dev = &qm->pdev->dev; in qm_stop_started_qp()
2678 for (i = 0; i < qm->qp_num; i++) { in qm_stop_started_qp()
2679 qp = &qm->qp_array[i]; in qm_stop_started_qp()
2697 static void qm_clear_queues(struct hisi_qm *qm) in qm_clear_queues() argument
2702 for (i = 0; i < qm->qp_num; i++) { in qm_clear_queues()
2703 qp = &qm->qp_array[i]; in qm_clear_queues()
2708 memset(qm->qdma.va, 0, qm->qdma.size); in qm_clear_queues()
2720 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) in hisi_qm_stop() argument
2722 struct device *dev = &qm->pdev->dev; in hisi_qm_stop()
2725 down_write(&qm->qps_lock); in hisi_qm_stop()
2727 qm->status.stop_reason = r; in hisi_qm_stop()
2728 if (!qm_avail_state(qm, QM_STOP)) { in hisi_qm_stop()
2733 if (qm->status.stop_reason == QM_SOFT_RESET || in hisi_qm_stop()
2734 qm->status.stop_reason == QM_FLR) { in hisi_qm_stop()
2735 ret = qm_stop_started_qp(qm); in hisi_qm_stop()
2743 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); in hisi_qm_stop()
2744 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); in hisi_qm_stop()
2746 if (qm->fun_type == QM_HW_PF) { in hisi_qm_stop()
2747 ret = hisi_qm_set_vft(qm, 0, 0, 0); in hisi_qm_stop()
2755 qm_clear_queues(qm); in hisi_qm_stop()
2756 atomic_set(&qm->status.flags, QM_STOP); in hisi_qm_stop()
2759 up_write(&qm->qps_lock); in hisi_qm_stop()
2767 struct hisi_qm *qm = filp->private_data; in qm_status_read() local
2771 val = atomic_read(&qm->status.flags); in qm_status_read()
2809 int hisi_qm_debug_init(struct hisi_qm *qm) in hisi_qm_debug_init() argument
2811 struct qm_dfx *dfx = &qm->debug.dfx; in hisi_qm_debug_init()
2816 qm_d = debugfs_create_dir("qm", qm->debug.debug_root); in hisi_qm_debug_init()
2817 qm->debug.qm_d = qm_d; in hisi_qm_debug_init()
2820 if (qm->fun_type == QM_HW_PF) in hisi_qm_debug_init()
2822 if (qm_create_debugfs_file(qm, i)) { in hisi_qm_debug_init()
2827 debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); in hisi_qm_debug_init()
2829 debugfs_create_file("cmd", 0444, qm->debug.qm_d, qm, &qm_cmd_fops); in hisi_qm_debug_init()
2831 debugfs_create_file("status", 0444, qm->debug.qm_d, qm, in hisi_qm_debug_init()
2854 void hisi_qm_debug_regs_clear(struct hisi_qm *qm) in hisi_qm_debug_regs_clear() argument
2860 writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); in hisi_qm_debug_regs_clear()
2861 writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); in hisi_qm_debug_regs_clear()
2867 writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE); in hisi_qm_debug_regs_clear()
2871 readl(qm->io_base + regs->reg_offset); in hisi_qm_debug_regs_clear()
2875 writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE); in hisi_qm_debug_regs_clear()
2879 static void qm_hw_error_init(struct hisi_qm *qm) in qm_hw_error_init() argument
2881 const struct hisi_qm_err_info *err_info = &qm->err_ini->err_info; in qm_hw_error_init()
2883 if (!qm->ops->hw_error_init) { in qm_hw_error_init()
2884 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); in qm_hw_error_init()
2888 qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe); in qm_hw_error_init()
2891 static void qm_hw_error_uninit(struct hisi_qm *qm) in qm_hw_error_uninit() argument
2893 if (!qm->ops->hw_error_uninit) { in qm_hw_error_uninit()
2894 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n"); in qm_hw_error_uninit()
2898 qm->ops->hw_error_uninit(qm); in qm_hw_error_uninit()
2901 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) in qm_hw_error_handle() argument
2903 if (!qm->ops->hw_error_handle) { in qm_hw_error_handle()
2904 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); in qm_hw_error_handle()
2908 return qm->ops->hw_error_handle(qm); in qm_hw_error_handle()
2917 void hisi_qm_dev_err_init(struct hisi_qm *qm) in hisi_qm_dev_err_init() argument
2919 if (qm->fun_type == QM_HW_VF) in hisi_qm_dev_err_init()
2922 qm_hw_error_init(qm); in hisi_qm_dev_err_init()
2924 if (!qm->err_ini->hw_err_enable) { in hisi_qm_dev_err_init()
2925 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n"); in hisi_qm_dev_err_init()
2928 qm->err_ini->hw_err_enable(qm); in hisi_qm_dev_err_init()
2938 void hisi_qm_dev_err_uninit(struct hisi_qm *qm) in hisi_qm_dev_err_uninit() argument
2940 if (qm->fun_type == QM_HW_VF) in hisi_qm_dev_err_uninit()
2943 qm_hw_error_uninit(qm); in hisi_qm_dev_err_uninit()
2945 if (!qm->err_ini->hw_err_disable) { in hisi_qm_dev_err_uninit()
2946 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n"); in hisi_qm_dev_err_uninit()
2949 qm->err_ini->hw_err_disable(qm); in hisi_qm_dev_err_uninit()
2984 struct hisi_qm *qm; in hisi_qm_sort_devices() local
2989 list_for_each_entry(qm, &qm_list->list, list) { in hisi_qm_sort_devices()
2990 dev = &qm->pdev->dev; in hisi_qm_sort_devices()
3002 res->qm = qm; in hisi_qm_sort_devices()
3048 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); in hisi_qm_alloc_qps_node()
3072 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) in qm_vf_q_assign() argument
3075 u32 q_base = qm->qp_num; in qm_vf_q_assign()
3081 remain_q_num = qm->ctrl_qp_num - qm->qp_num; in qm_vf_q_assign()
3084 if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs) in qm_vf_q_assign()
3091 ret = hisi_qm_set_vft(qm, i, q_base, q_num); in qm_vf_q_assign()
3094 hisi_qm_set_vft(qm, j, 0, 0); in qm_vf_q_assign()
3103 static int qm_clear_vft_config(struct hisi_qm *qm) in qm_clear_vft_config() argument
3108 for (i = 1; i <= qm->vfs_num; i++) { in qm_clear_vft_config()
3109 ret = hisi_qm_set_vft(qm, i, 0, 0); in qm_clear_vft_config()
3113 qm->vfs_num = 0; in qm_clear_vft_config()
3129 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_sriov_enable() local
3141 ret = qm_vf_q_assign(qm, num_vfs); in hisi_qm_sriov_enable()
3147 qm->vfs_num = num_vfs; in hisi_qm_sriov_enable()
3152 qm_clear_vft_config(qm); in hisi_qm_sriov_enable()
3171 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_sriov_disable() local
3179 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) { in hisi_qm_sriov_disable()
3185 return qm_clear_vft_config(qm); in hisi_qm_sriov_disable()
3205 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) in qm_dev_err_handle() argument
3209 if (!qm->err_ini->get_dev_hw_err_status) { in qm_dev_err_handle()
3210 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n"); in qm_dev_err_handle()
3215 err_sts = qm->err_ini->get_dev_hw_err_status(qm); in qm_dev_err_handle()
3217 if (err_sts & qm->err_ini->err_info.ecc_2bits_mask) in qm_dev_err_handle()
3218 qm->err_status.is_dev_ecc_mbit = true; in qm_dev_err_handle()
3220 if (!qm->err_ini->log_dev_hw_err) { in qm_dev_err_handle()
3221 dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n"); in qm_dev_err_handle()
3225 qm->err_ini->log_dev_hw_err(qm, err_sts); in qm_dev_err_handle()
3232 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm) in qm_process_dev_error() argument
3237 qm_ret = qm_hw_error_handle(qm); in qm_process_dev_error()
3240 dev_ret = qm_dev_err_handle(qm); in qm_process_dev_error()
3258 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_dev_err_detected() local
3268 ret = qm_process_dev_error(qm); in hisi_qm_dev_err_detected()
3276 static int qm_get_hw_error_status(struct hisi_qm *qm) in qm_get_hw_error_status() argument
3278 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); in qm_get_hw_error_status()
3281 static int qm_check_req_recv(struct hisi_qm *qm) in qm_check_req_recv() argument
3283 struct pci_dev *pdev = qm->pdev; in qm_check_req_recv()
3287 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); in qm_check_req_recv()
3288 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, in qm_check_req_recv()
3296 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); in qm_check_req_recv()
3297 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, in qm_check_req_recv()
3306 static int qm_set_pf_mse(struct hisi_qm *qm, bool set) in qm_set_pf_mse() argument
3308 struct pci_dev *pdev = qm->pdev; in qm_set_pf_mse()
3330 static int qm_set_vf_mse(struct hisi_qm *qm, bool set) in qm_set_vf_mse() argument
3332 struct pci_dev *pdev = qm->pdev; in qm_set_vf_mse()
3357 static int qm_set_msi(struct hisi_qm *qm, bool set) in qm_set_msi() argument
3359 struct pci_dev *pdev = qm->pdev; in qm_set_msi()
3367 if (qm->err_status.is_qm_ecc_mbit || in qm_set_msi()
3368 qm->err_status.is_dev_ecc_mbit) in qm_set_msi()
3372 if (readl(qm->io_base + QM_PEH_DFX_INFO0)) in qm_set_msi()
3379 static int qm_vf_reset_prepare(struct hisi_qm *qm, in qm_vf_reset_prepare() argument
3382 struct hisi_qm_list *qm_list = qm->qm_list; in qm_vf_reset_prepare()
3383 struct pci_dev *pdev = qm->pdev; in qm_vf_reset_prepare()
3409 static int qm_reset_prepare_ready(struct hisi_qm *qm) in qm_reset_prepare_ready() argument
3411 struct pci_dev *pdev = qm->pdev; in qm_reset_prepare_ready()
3425 static int qm_controller_reset_prepare(struct hisi_qm *qm) in qm_controller_reset_prepare() argument
3427 struct pci_dev *pdev = qm->pdev; in qm_controller_reset_prepare()
3430 ret = qm_reset_prepare_ready(qm); in qm_controller_reset_prepare()
3436 if (qm->vfs_num) { in qm_controller_reset_prepare()
3437 ret = qm_vf_reset_prepare(qm, QM_SOFT_RESET); in qm_controller_reset_prepare()
3444 ret = hisi_qm_stop(qm, QM_SOFT_RESET); in qm_controller_reset_prepare()
3453 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) in qm_dev_ecc_mbit_handle() argument
3457 if (!qm->err_status.is_dev_ecc_mbit && in qm_dev_ecc_mbit_handle()
3458 qm->err_status.is_qm_ecc_mbit && in qm_dev_ecc_mbit_handle()
3459 qm->err_ini->close_axi_master_ooo) { in qm_dev_ecc_mbit_handle()
3461 qm->err_ini->close_axi_master_ooo(qm); in qm_dev_ecc_mbit_handle()
3463 } else if (qm->err_status.is_dev_ecc_mbit && in qm_dev_ecc_mbit_handle()
3464 !qm->err_status.is_qm_ecc_mbit && in qm_dev_ecc_mbit_handle()
3465 !qm->err_ini->close_axi_master_ooo) { in qm_dev_ecc_mbit_handle()
3467 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); in qm_dev_ecc_mbit_handle()
3469 qm->io_base + QM_RAS_NFE_ENABLE); in qm_dev_ecc_mbit_handle()
3470 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); in qm_dev_ecc_mbit_handle()
3474 static int qm_soft_reset(struct hisi_qm *qm) in qm_soft_reset() argument
3476 struct pci_dev *pdev = qm->pdev; in qm_soft_reset()
3481 ret = qm_check_req_recv(qm); in qm_soft_reset()
3485 if (qm->vfs_num) { in qm_soft_reset()
3486 ret = qm_set_vf_mse(qm, false); in qm_soft_reset()
3493 ret = qm_set_msi(qm, false); in qm_soft_reset()
3499 qm_dev_ecc_mbit_handle(qm); in qm_soft_reset()
3503 qm->io_base + ACC_MASTER_GLOBAL_CTRL); in qm_soft_reset()
3506 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, in qm_soft_reset()
3515 ret = qm_set_pf_mse(qm, false); in qm_soft_reset()
3527 qm->err_ini->err_info.acpi_rst, in qm_soft_reset()
3546 static int qm_vf_reset_done(struct hisi_qm *qm) in qm_vf_reset_done() argument
3548 struct hisi_qm_list *qm_list = qm->qm_list; in qm_vf_reset_done()
3549 struct pci_dev *pdev = qm->pdev; in qm_vf_reset_done()
3575 static int qm_get_dev_err_status(struct hisi_qm *qm) in qm_get_dev_err_status() argument
3577 return qm->err_ini->get_dev_hw_err_status(qm); in qm_get_dev_err_status()
3580 static int qm_dev_hw_init(struct hisi_qm *qm) in qm_dev_hw_init() argument
3582 return qm->err_ini->hw_init(qm); in qm_dev_hw_init()
3585 static void qm_restart_prepare(struct hisi_qm *qm) in qm_restart_prepare() argument
3589 if (!qm->err_status.is_qm_ecc_mbit && in qm_restart_prepare()
3590 !qm->err_status.is_dev_ecc_mbit) in qm_restart_prepare()
3594 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_prepare()
3595 writel(value & ~qm->err_ini->err_info.msi_wr_port, in qm_restart_prepare()
3596 qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_prepare()
3599 value = qm_get_dev_err_status(qm) & in qm_restart_prepare()
3600 qm->err_ini->err_info.ecc_2bits_mask; in qm_restart_prepare()
3601 if (value && qm->err_ini->clear_dev_hw_err_status) in qm_restart_prepare()
3602 qm->err_ini->clear_dev_hw_err_status(qm, value); in qm_restart_prepare()
3605 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_restart_prepare()
3608 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); in qm_restart_prepare()
3610 if (qm->err_ini->open_axi_master_ooo) in qm_restart_prepare()
3611 qm->err_ini->open_axi_master_ooo(qm); in qm_restart_prepare()
3614 static void qm_restart_done(struct hisi_qm *qm) in qm_restart_done() argument
3618 if (!qm->err_status.is_qm_ecc_mbit && in qm_restart_done()
3619 !qm->err_status.is_dev_ecc_mbit) in qm_restart_done()
3623 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_done()
3624 value |= qm->err_ini->err_info.msi_wr_port; in qm_restart_done()
3625 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_done()
3627 qm->err_status.is_qm_ecc_mbit = false; in qm_restart_done()
3628 qm->err_status.is_dev_ecc_mbit = false; in qm_restart_done()
3631 static int qm_controller_reset_done(struct hisi_qm *qm) in qm_controller_reset_done() argument
3633 struct pci_dev *pdev = qm->pdev; in qm_controller_reset_done()
3636 ret = qm_set_msi(qm, true); in qm_controller_reset_done()
3642 ret = qm_set_pf_mse(qm, true); in qm_controller_reset_done()
3648 if (qm->vfs_num) { in qm_controller_reset_done()
3649 ret = qm_set_vf_mse(qm, true); in qm_controller_reset_done()
3656 ret = qm_dev_hw_init(qm); in qm_controller_reset_done()
3662 qm_restart_prepare(qm); in qm_controller_reset_done()
3664 ret = qm_restart(qm); in qm_controller_reset_done()
3670 if (qm->vfs_num) { in qm_controller_reset_done()
3671 ret = qm_vf_q_assign(qm, qm->vfs_num); in qm_controller_reset_done()
3678 ret = qm_vf_reset_done(qm); in qm_controller_reset_done()
3684 hisi_qm_dev_err_init(qm); in qm_controller_reset_done()
3685 qm_restart_done(qm); in qm_controller_reset_done()
3687 clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag); in qm_controller_reset_done()
3692 static int qm_controller_reset(struct hisi_qm *qm) in qm_controller_reset() argument
3694 struct pci_dev *pdev = qm->pdev; in qm_controller_reset()
3699 ret = qm_controller_reset_prepare(qm); in qm_controller_reset()
3703 ret = qm_soft_reset(qm); in qm_controller_reset()
3709 ret = qm_controller_reset_done(qm); in qm_controller_reset()
3727 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_dev_slot_reset() local
3736 ret = qm_controller_reset(qm); in hisi_qm_dev_slot_reset()
3747 static int qm_check_dev_error(struct hisi_qm *qm) in qm_check_dev_error() argument
3751 if (qm->fun_type == QM_HW_VF) in qm_check_dev_error()
3754 ret = qm_get_hw_error_status(qm) & QM_ECC_MBIT; in qm_check_dev_error()
3758 return (qm_get_dev_err_status(qm) & in qm_check_dev_error()
3759 qm->err_ini->err_info.ecc_2bits_mask); in qm_check_dev_error()
3765 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_reset_prepare() local
3781 ret = qm_reset_prepare_ready(qm); in hisi_qm_reset_prepare()
3787 if (qm->vfs_num) { in hisi_qm_reset_prepare()
3788 ret = qm_vf_reset_prepare(qm, QM_FLR); in hisi_qm_reset_prepare()
3796 ret = hisi_qm_stop(qm, QM_FLR); in hisi_qm_reset_prepare()
3809 struct hisi_qm *qm = pci_get_drvdata(pf_pdev); in qm_flr_reset_complete() local
3812 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); in qm_flr_reset_complete()
3818 clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag); in qm_flr_reset_complete()
3826 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_reset_done() local
3831 ret = qm_restart(qm); in hisi_qm_reset_done()
3837 if (qm->fun_type == QM_HW_PF) { in hisi_qm_reset_done()
3838 ret = qm_dev_hw_init(qm); in hisi_qm_reset_done()
3844 if (!qm->vfs_num) in hisi_qm_reset_done()
3847 ret = qm_vf_q_assign(qm, qm->vfs_num); in hisi_qm_reset_done()
3853 ret = qm_vf_reset_done(qm); in hisi_qm_reset_done()
3868 struct hisi_qm *qm = data; in qm_abnormal_irq() local
3871 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); in qm_abnormal_irq()
3872 ret = qm_process_dev_error(qm); in qm_abnormal_irq()
3874 schedule_work(&qm->rst_work); in qm_abnormal_irq()
3879 static int qm_irq_register(struct hisi_qm *qm) in qm_irq_register() argument
3881 struct pci_dev *pdev = qm->pdev; in qm_irq_register()
3885 qm_irq, IRQF_SHARED, qm->dev_name, qm); in qm_irq_register()
3889 if (qm->ver != QM_HW_V1) { in qm_irq_register()
3891 qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm); in qm_irq_register()
3895 if (qm->fun_type == QM_HW_PF) { in qm_irq_register()
3899 qm->dev_name, qm); in qm_irq_register()
3908 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); in qm_irq_register()
3910 free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); in qm_irq_register()
3922 struct hisi_qm *qm = pci_get_drvdata(pdev); in hisi_qm_dev_shutdown() local
3925 ret = hisi_qm_stop(qm, QM_NORMAL); in hisi_qm_dev_shutdown()
3933 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work); in hisi_qm_controller_reset() local
3937 ret = qm_controller_reset(qm); in hisi_qm_controller_reset()
3939 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); in hisi_qm_controller_reset()
3951 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list) in hisi_qm_alg_register() argument
3959 list_add_tail(&qm->list, &qm_list->list); in hisi_qm_alg_register()
3966 list_del(&qm->list); in hisi_qm_alg_register()
3984 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list) in hisi_qm_alg_unregister() argument
3987 list_del(&qm->list); in hisi_qm_alg_unregister()
4001 int hisi_qm_init(struct hisi_qm *qm) in hisi_qm_init() argument
4003 struct pci_dev *pdev = qm->pdev; in hisi_qm_init()
4008 hisi_qm_pre_init(qm); in hisi_qm_init()
4010 ret = qm_alloc_uacce(qm); in hisi_qm_init()
4020 ret = pci_request_mem_regions(pdev, qm->dev_name); in hisi_qm_init()
4026 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); in hisi_qm_init()
4027 qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2); in hisi_qm_init()
4028 qm->io_base = ioremap(qm->phys_base, qm->phys_size); in hisi_qm_init()
4029 if (!qm->io_base) { in hisi_qm_init()
4039 if (!qm->ops->get_irq_num) { in hisi_qm_init()
4043 num_vec = qm->ops->get_irq_num(qm); in hisi_qm_init()
4050 ret = qm_irq_register(qm); in hisi_qm_init()
4054 if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) { in hisi_qm_init()
4056 ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); in hisi_qm_init()
4061 ret = hisi_qm_memory_init(qm); in hisi_qm_init()
4065 INIT_WORK(&qm->work, qm_work_process); in hisi_qm_init()
4066 if (qm->fun_type == QM_HW_PF) in hisi_qm_init()
4067 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); in hisi_qm_init()
4069 atomic_set(&qm->status.flags, QM_INIT); in hisi_qm_init()
4074 qm_irq_unregister(qm); in hisi_qm_init()
4078 iounmap(qm->io_base); in hisi_qm_init()
4084 uacce_remove(qm->uacce); in hisi_qm_init()
4085 qm->uacce = NULL; in hisi_qm_init()