Lines Matching refs:qm
71 #define HPRE_ADDR(qm, offset) ((qm)->io_base + (offset)) argument
217 static int hpre_cfg_by_dsm(struct hisi_qm *qm) in hpre_cfg_by_dsm() argument
219 struct device *dev = &qm->pdev->dev; in hpre_cfg_by_dsm()
246 static void disable_flr_of_bme(struct hisi_qm *qm) in disable_flr_of_bme() argument
250 val = readl(HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); in disable_flr_of_bme()
253 writel(val, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); in disable_flr_of_bme()
254 writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE)); in disable_flr_of_bme()
257 static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) in hpre_set_user_domain_and_cache() argument
259 struct device *dev = &qm->pdev->dev; in hpre_set_user_domain_and_cache()
264 writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_ARUSER_M_CFG_ENABLE)); in hpre_set_user_domain_and_cache()
265 writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE)); in hpre_set_user_domain_and_cache()
266 writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG)); in hpre_set_user_domain_and_cache()
269 val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK)); in hpre_set_user_domain_and_cache()
271 writel_relaxed(val, HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK)); in hpre_set_user_domain_and_cache()
273 writel(0x1, HPRE_ADDR(qm, HPRE_TYPES_ENB)); in hpre_set_user_domain_and_cache()
274 writel(HPRE_QM_VFG_AX_MASK, HPRE_ADDR(qm, HPRE_VFG_AXCACHE)); in hpre_set_user_domain_and_cache()
275 writel(0x0, HPRE_ADDR(qm, HPRE_BD_ENDIAN)); in hpre_set_user_domain_and_cache()
276 writel(0x0, HPRE_ADDR(qm, HPRE_INT_MASK)); in hpre_set_user_domain_and_cache()
277 writel(0x0, HPRE_ADDR(qm, HPRE_RAS_ECC_1BIT_TH)); in hpre_set_user_domain_and_cache()
278 writel(0x0, HPRE_ADDR(qm, HPRE_POISON_BYPASS)); in hpre_set_user_domain_and_cache()
279 writel(0x0, HPRE_ADDR(qm, HPRE_COMM_CNT_CLR_CE)); in hpre_set_user_domain_and_cache()
280 writel(0x0, HPRE_ADDR(qm, HPRE_ECC_BYPASS)); in hpre_set_user_domain_and_cache()
282 writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_ARUSR_CFG)); in hpre_set_user_domain_and_cache()
283 writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG)); in hpre_set_user_domain_and_cache()
284 writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG)); in hpre_set_user_domain_and_cache()
285 ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, HPRE_RDCHN_INI_ST), val, in hpre_set_user_domain_and_cache()
299 HPRE_ADDR(qm, offset + HPRE_CORE_ENB)); in hpre_set_user_domain_and_cache()
300 writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG)); in hpre_set_user_domain_and_cache()
301 ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset + in hpre_set_user_domain_and_cache()
314 ret = hpre_cfg_by_dsm(qm); in hpre_set_user_domain_and_cache()
318 disable_flr_of_bme(qm); in hpre_set_user_domain_and_cache()
323 static void hpre_cnt_regs_clear(struct hisi_qm *qm) in hpre_cnt_regs_clear() argument
329 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); in hpre_cnt_regs_clear()
330 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); in hpre_cnt_regs_clear()
335 writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY); in hpre_cnt_regs_clear()
339 writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE); in hpre_cnt_regs_clear()
341 hisi_qm_debug_regs_clear(qm); in hpre_cnt_regs_clear()
344 static void hpre_hw_error_disable(struct hisi_qm *qm) in hpre_hw_error_disable() argument
349 writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK); in hpre_hw_error_disable()
352 val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); in hpre_hw_error_disable()
354 writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); in hpre_hw_error_disable()
357 static void hpre_hw_error_enable(struct hisi_qm *qm) in hpre_hw_error_enable() argument
362 writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT); in hpre_hw_error_enable()
365 writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK); in hpre_hw_error_enable()
366 writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB); in hpre_hw_error_enable()
367 writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB); in hpre_hw_error_enable()
368 writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB); in hpre_hw_error_enable()
371 val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); in hpre_hw_error_enable()
373 writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); in hpre_hw_error_enable()
380 return &hpre->qm; in hpre_file_to_qm()
385 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_current_qm_read() local
387 return readl(qm->io_base + QM_DFX_MB_CNT_VF); in hpre_current_qm_read()
392 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_current_qm_write() local
393 u32 num_vfs = qm->vfs_num; in hpre_current_qm_write()
401 qm->debug.curr_qm_qp_num = qm->qp_num; in hpre_current_qm_write()
403 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs; in hpre_current_qm_write()
405 qm->debug.curr_qm_qp_num = in hpre_current_qm_write()
406 qm->ctrl_qp_num - qm->qp_num - (num_vfs - 1) * vfq_num; in hpre_current_qm_write()
408 qm->debug.curr_qm_qp_num = vfq_num; in hpre_current_qm_write()
412 writel(val, qm->io_base + QM_DFX_MB_CNT_VF); in hpre_current_qm_write()
413 writel(val, qm->io_base + QM_DFX_DB_CNT_VF); in hpre_current_qm_write()
416 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); in hpre_current_qm_write()
417 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); in hpre_current_qm_write()
420 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); in hpre_current_qm_write()
421 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); in hpre_current_qm_write()
428 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_clear_enable_read() local
430 return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) & in hpre_clear_enable_read()
436 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_clear_enable_write() local
442 tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) & in hpre_clear_enable_write()
444 writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE); in hpre_clear_enable_write()
451 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_cluster_inqry_read() local
456 return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT); in hpre_cluster_inqry_read()
461 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_cluster_inqry_write() local
466 writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY); in hpre_cluster_inqry_write()
588 static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, in hpre_create_debugfs_file() argument
591 struct hpre *hpre = container_of(qm, struct hpre, qm); in hpre_create_debugfs_file()
598 file_dir = qm->debug.debug_root; in hpre_create_debugfs_file()
613 static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm) in hpre_pf_comm_regs_debugfs_init() argument
615 struct device *dev = &qm->pdev->dev; in hpre_pf_comm_regs_debugfs_init()
624 regset->base = qm->io_base; in hpre_pf_comm_regs_debugfs_init()
626 debugfs_create_regset32("regs", 0444, qm->debug.debug_root, regset); in hpre_pf_comm_regs_debugfs_init()
630 static int hpre_cluster_debugfs_init(struct hisi_qm *qm) in hpre_cluster_debugfs_init() argument
632 struct device *dev = &qm->pdev->dev; in hpre_cluster_debugfs_init()
642 tmp_d = debugfs_create_dir(buf, qm->debug.debug_root); in hpre_cluster_debugfs_init()
650 regset->base = qm->io_base + hpre_cluster_offsets[i]; in hpre_cluster_debugfs_init()
653 ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL, in hpre_cluster_debugfs_init()
662 static int hpre_ctrl_debug_init(struct hisi_qm *qm) in hpre_ctrl_debug_init() argument
666 ret = hpre_create_debugfs_file(qm, NULL, HPRE_CURRENT_QM, in hpre_ctrl_debug_init()
671 ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE, in hpre_ctrl_debug_init()
676 ret = hpre_pf_comm_regs_debugfs_init(qm); in hpre_ctrl_debug_init()
680 return hpre_cluster_debugfs_init(qm); in hpre_ctrl_debug_init()
683 static void hpre_dfx_debug_init(struct hisi_qm *qm) in hpre_dfx_debug_init() argument
685 struct hpre *hpre = container_of(qm, struct hpre, qm); in hpre_dfx_debug_init()
690 parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root); in hpre_dfx_debug_init()
698 static int hpre_debugfs_init(struct hisi_qm *qm) in hpre_debugfs_init() argument
700 struct device *dev = &qm->pdev->dev; in hpre_debugfs_init()
703 qm->debug.debug_root = debugfs_create_dir(dev_name(dev), in hpre_debugfs_init()
706 qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET; in hpre_debugfs_init()
707 qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN; in hpre_debugfs_init()
708 ret = hisi_qm_debug_init(qm); in hpre_debugfs_init()
712 if (qm->pdev->device == HPRE_PCI_DEVICE_ID) { in hpre_debugfs_init()
713 ret = hpre_ctrl_debug_init(qm); in hpre_debugfs_init()
718 hpre_dfx_debug_init(qm); in hpre_debugfs_init()
723 debugfs_remove_recursive(qm->debug.debug_root); in hpre_debugfs_init()
727 static void hpre_debugfs_exit(struct hisi_qm *qm) in hpre_debugfs_exit() argument
729 debugfs_remove_recursive(qm->debug.debug_root); in hpre_debugfs_exit()
732 static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) in hpre_qm_init() argument
739 qm->pdev = pdev; in hpre_qm_init()
740 qm->ver = pdev->revision; in hpre_qm_init()
741 qm->sqe_size = HPRE_SQE_SIZE; in hpre_qm_init()
742 qm->dev_name = hpre_name; in hpre_qm_init()
744 qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ? in hpre_qm_init()
746 if (qm->fun_type == QM_HW_PF) { in hpre_qm_init()
747 qm->qp_base = HPRE_PF_DEF_Q_BASE; in hpre_qm_init()
748 qm->qp_num = pf_q_num; in hpre_qm_init()
749 qm->debug.curr_qm_qp_num = pf_q_num; in hpre_qm_init()
750 qm->qm_list = &hpre_devices; in hpre_qm_init()
753 return hisi_qm_init(qm); in hpre_qm_init()
756 static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts) in hpre_log_hw_error() argument
759 struct device *dev = &qm->pdev->dev; in hpre_log_hw_error()
769 static u32 hpre_get_hw_err_status(struct hisi_qm *qm) in hpre_get_hw_err_status() argument
771 return readl(qm->io_base + HPRE_HAC_INT_STATUS); in hpre_get_hw_err_status()
774 static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) in hpre_clear_hw_err_status() argument
776 writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT); in hpre_clear_hw_err_status()
779 static void hpre_open_axi_master_ooo(struct hisi_qm *qm) in hpre_open_axi_master_ooo() argument
783 value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); in hpre_open_axi_master_ooo()
785 HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB)); in hpre_open_axi_master_ooo()
787 HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB)); in hpre_open_axi_master_ooo()
811 struct hisi_qm *qm = &hpre->qm; in hpre_pf_probe_init() local
814 qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2; in hpre_pf_probe_init()
816 ret = hpre_set_user_domain_and_cache(qm); in hpre_pf_probe_init()
820 qm->err_ini = &hpre_err_ini; in hpre_pf_probe_init()
821 hisi_qm_dev_err_init(qm); in hpre_pf_probe_init()
828 struct hisi_qm *qm = &hpre->qm; in hpre_probe_init() local
831 if (qm->fun_type == QM_HW_PF) { in hpre_probe_init()
842 struct hisi_qm *qm; in hpre_probe() local
850 qm = &hpre->qm; in hpre_probe()
851 ret = hpre_qm_init(qm, pdev); in hpre_probe()
863 ret = hisi_qm_start(qm); in hpre_probe()
867 ret = hpre_debugfs_init(qm); in hpre_probe()
871 ret = hisi_qm_alg_register(qm, &hpre_devices); in hpre_probe()
877 if (qm->fun_type == QM_HW_PF && vfs_num) { in hpre_probe()
886 hisi_qm_alg_unregister(qm, &hpre_devices); in hpre_probe()
889 hpre_debugfs_exit(qm); in hpre_probe()
890 hisi_qm_stop(qm, QM_NORMAL); in hpre_probe()
893 hisi_qm_dev_err_uninit(qm); in hpre_probe()
896 hisi_qm_uninit(qm); in hpre_probe()
903 struct hisi_qm *qm = pci_get_drvdata(pdev); in hpre_remove() local
906 hisi_qm_wait_task_finish(qm, &hpre_devices); in hpre_remove()
907 hisi_qm_alg_unregister(qm, &hpre_devices); in hpre_remove()
908 if (qm->fun_type == QM_HW_PF && qm->vfs_num) { in hpre_remove()
909 ret = hisi_qm_sriov_disable(pdev, qm->is_frozen); in hpre_remove()
915 if (qm->fun_type == QM_HW_PF) { in hpre_remove()
916 hpre_cnt_regs_clear(qm); in hpre_remove()
917 qm->debug.curr_qm_qp_num = 0; in hpre_remove()
920 hpre_debugfs_exit(qm); in hpre_remove()
921 hisi_qm_stop(qm, QM_NORMAL); in hpre_remove()
922 hisi_qm_dev_err_uninit(qm); in hpre_remove()
923 hisi_qm_uninit(qm); in hpre_remove()