• Home
  • Raw
  • Download

Lines Matching full:qm

360 bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)  in hpre_check_alg_support()  argument
364 cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP_CAP_IDX].cap_val; in hpre_check_alg_support()
373 struct hisi_qm *qm = s->private; in hpre_diff_regs_show() local
375 hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs, in hpre_diff_regs_show()
460 static void hpre_config_pasid(struct hisi_qm *qm) in hpre_config_pasid() argument
464 if (qm->ver >= QM_HW_V3) in hpre_config_pasid()
467 val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG); in hpre_config_pasid()
468 val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG); in hpre_config_pasid()
469 if (qm->use_sva) { in hpre_config_pasid()
476 writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG); in hpre_config_pasid()
477 writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG); in hpre_config_pasid()
480 static int hpre_cfg_by_dsm(struct hisi_qm *qm) in hpre_cfg_by_dsm() argument
482 struct device *dev = &qm->pdev->dev; in hpre_cfg_by_dsm()
504 static int hpre_set_cluster(struct hisi_qm *qm) in hpre_set_cluster() argument
506 struct device *dev = &qm->pdev->dev; in hpre_set_cluster()
513 cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_ENABLE_BITMAP_CAP_IDX].cap_val; in hpre_set_cluster()
514 clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; in hpre_set_cluster()
520 qm->io_base + offset + HPRE_CORE_ENB); in hpre_set_cluster()
521 writel(0x1, qm->io_base + offset + HPRE_CORE_INI_CFG); in hpre_set_cluster()
522 ret = readl_relaxed_poll_timeout(qm->io_base + offset + in hpre_set_cluster()
543 static void disable_flr_of_bme(struct hisi_qm *qm) in disable_flr_of_bme() argument
547 val = readl(qm->io_base + QM_PEH_AXUSER_CFG); in disable_flr_of_bme()
550 writel(val, qm->io_base + QM_PEH_AXUSER_CFG); in disable_flr_of_bme()
551 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); in disable_flr_of_bme()
554 static void hpre_open_sva_prefetch(struct hisi_qm *qm) in hpre_open_sva_prefetch() argument
559 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) in hpre_open_sva_prefetch()
563 val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); in hpre_open_sva_prefetch()
565 writel(val, qm->io_base + HPRE_PREFETCH_CFG); in hpre_open_sva_prefetch()
567 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG, in hpre_open_sva_prefetch()
572 pci_err(qm->pdev, "failed to open sva prefetch\n"); in hpre_open_sva_prefetch()
575 static void hpre_close_sva_prefetch(struct hisi_qm *qm) in hpre_close_sva_prefetch() argument
580 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) in hpre_close_sva_prefetch()
583 val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); in hpre_close_sva_prefetch()
585 writel(val, qm->io_base + HPRE_PREFETCH_CFG); in hpre_close_sva_prefetch()
587 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX, in hpre_close_sva_prefetch()
592 pci_err(qm->pdev, "failed to close sva prefetch\n"); in hpre_close_sva_prefetch()
595 static void hpre_enable_clock_gate(struct hisi_qm *qm) in hpre_enable_clock_gate() argument
599 if (qm->ver < QM_HW_V3) in hpre_enable_clock_gate()
602 val = readl(qm->io_base + HPRE_CLKGATE_CTL); in hpre_enable_clock_gate()
604 writel(val, qm->io_base + HPRE_CLKGATE_CTL); in hpre_enable_clock_gate()
606 val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE); in hpre_enable_clock_gate()
608 writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE); in hpre_enable_clock_gate()
610 val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL); in hpre_enable_clock_gate()
612 writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL); in hpre_enable_clock_gate()
614 val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG); in hpre_enable_clock_gate()
616 writel(val, qm->io_base + HPRE_CORE_SHB_CFG); in hpre_enable_clock_gate()
619 static void hpre_disable_clock_gate(struct hisi_qm *qm) in hpre_disable_clock_gate() argument
623 if (qm->ver < QM_HW_V3) in hpre_disable_clock_gate()
626 val = readl(qm->io_base + HPRE_CLKGATE_CTL); in hpre_disable_clock_gate()
628 writel(val, qm->io_base + HPRE_CLKGATE_CTL); in hpre_disable_clock_gate()
630 val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE); in hpre_disable_clock_gate()
632 writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE); in hpre_disable_clock_gate()
634 val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL); in hpre_disable_clock_gate()
636 writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL); in hpre_disable_clock_gate()
638 val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG); in hpre_disable_clock_gate()
640 writel(val, qm->io_base + HPRE_CORE_SHB_CFG); in hpre_disable_clock_gate()
643 static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) in hpre_set_user_domain_and_cache() argument
645 struct device *dev = &qm->pdev->dev; in hpre_set_user_domain_and_cache()
650 hpre_disable_clock_gate(qm); in hpre_set_user_domain_and_cache()
652 writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_ARUSER_M_CFG_ENABLE); in hpre_set_user_domain_and_cache()
653 writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE); in hpre_set_user_domain_and_cache()
654 writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG); in hpre_set_user_domain_and_cache()
657 val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK); in hpre_set_user_domain_and_cache()
659 writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK); in hpre_set_user_domain_and_cache()
661 if (qm->ver >= QM_HW_V3) in hpre_set_user_domain_and_cache()
663 qm->io_base + HPRE_TYPES_ENB); in hpre_set_user_domain_and_cache()
665 writel(HPRE_RSA_ENB, qm->io_base + HPRE_TYPES_ENB); in hpre_set_user_domain_and_cache()
667 writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE); in hpre_set_user_domain_and_cache()
668 writel(0x0, qm->io_base + HPRE_BD_ENDIAN); in hpre_set_user_domain_and_cache()
669 writel(0x0, qm->io_base + HPRE_INT_MASK); in hpre_set_user_domain_and_cache()
670 writel(0x0, qm->io_base + HPRE_POISON_BYPASS); in hpre_set_user_domain_and_cache()
671 writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE); in hpre_set_user_domain_and_cache()
672 writel(0x0, qm->io_base + HPRE_ECC_BYPASS); in hpre_set_user_domain_and_cache()
674 writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG); in hpre_set_user_domain_and_cache()
675 writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_AWUSR_CFG); in hpre_set_user_domain_and_cache()
676 writel(0x1, qm->io_base + HPRE_RDCHN_INI_CFG); in hpre_set_user_domain_and_cache()
677 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_RDCHN_INI_ST, val, in hpre_set_user_domain_and_cache()
686 ret = hpre_set_cluster(qm); in hpre_set_user_domain_and_cache()
691 if (qm->ver == QM_HW_V2) { in hpre_set_user_domain_and_cache()
692 ret = hpre_cfg_by_dsm(qm); in hpre_set_user_domain_and_cache()
696 disable_flr_of_bme(qm); in hpre_set_user_domain_and_cache()
700 hpre_config_pasid(qm); in hpre_set_user_domain_and_cache()
702 hpre_enable_clock_gate(qm); in hpre_set_user_domain_and_cache()
707 static void hpre_cnt_regs_clear(struct hisi_qm *qm) in hpre_cnt_regs_clear() argument
714 clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; in hpre_cnt_regs_clear()
717 writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY); in hpre_cnt_regs_clear()
721 writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE); in hpre_cnt_regs_clear()
723 hisi_qm_debug_regs_clear(qm); in hpre_cnt_regs_clear()
726 static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable) in hpre_master_ooo_ctrl() argument
730 val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); in hpre_master_ooo_ctrl()
733 val2 = hisi_qm_get_hw_info(qm, hpre_basic_info, in hpre_master_ooo_ctrl()
734 HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); in hpre_master_ooo_ctrl()
740 if (qm->ver > QM_HW_V2) in hpre_master_ooo_ctrl()
741 writel(val2, qm->io_base + HPRE_OOO_SHUTDOWN_SEL); in hpre_master_ooo_ctrl()
743 writel(val1, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); in hpre_master_ooo_ctrl()
746 static void hpre_hw_error_disable(struct hisi_qm *qm) in hpre_hw_error_disable() argument
750 ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); in hpre_hw_error_disable()
751 nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); in hpre_hw_error_disable()
754 writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK); in hpre_hw_error_disable()
756 hpre_master_ooo_ctrl(qm, false); in hpre_hw_error_disable()
759 static void hpre_hw_error_enable(struct hisi_qm *qm) in hpre_hw_error_enable() argument
763 ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); in hpre_hw_error_enable()
764 nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); in hpre_hw_error_enable()
767 writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT); in hpre_hw_error_enable()
770 writel(ce, qm->io_base + HPRE_RAS_CE_ENB); in hpre_hw_error_enable()
771 writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB); in hpre_hw_error_enable()
772 writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB); in hpre_hw_error_enable()
775 hpre_master_ooo_ctrl(qm, true); in hpre_hw_error_enable()
778 writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK); in hpre_hw_error_enable()
785 return &hpre->qm; in hpre_file_to_qm()
790 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_clear_enable_read() local
792 return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) & in hpre_clear_enable_read()
798 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_clear_enable_write() local
804 tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) & in hpre_clear_enable_write()
806 writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE); in hpre_clear_enable_write()
813 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_cluster_inqry_read() local
818 return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT); in hpre_cluster_inqry_read()
823 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_cluster_inqry_write() local
828 writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY); in hpre_cluster_inqry_write()
835 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_ctrl_debug_read() local
840 ret = hisi_qm_get_dfx_access(qm); in hpre_ctrl_debug_read()
857 hisi_qm_put_dfx_access(qm); in hpre_ctrl_debug_read()
863 hisi_qm_put_dfx_access(qm); in hpre_ctrl_debug_read()
871 struct hisi_qm *qm = hpre_file_to_qm(file); in hpre_ctrl_debug_write() local
891 ret = hisi_qm_get_dfx_access(qm); in hpre_ctrl_debug_write()
914 hisi_qm_put_dfx_access(qm); in hpre_ctrl_debug_write()
954 static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, in hpre_create_debugfs_file() argument
957 struct hpre *hpre = container_of(qm, struct hpre, qm); in hpre_create_debugfs_file()
964 file_dir = qm->debug.debug_root; in hpre_create_debugfs_file()
979 static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm) in hpre_pf_comm_regs_debugfs_init() argument
981 struct device *dev = &qm->pdev->dev; in hpre_pf_comm_regs_debugfs_init()
990 regset->base = qm->io_base; in hpre_pf_comm_regs_debugfs_init()
993 debugfs_create_file("regs", 0444, qm->debug.debug_root, in hpre_pf_comm_regs_debugfs_init()
999 static int hpre_cluster_debugfs_init(struct hisi_qm *qm) in hpre_cluster_debugfs_init() argument
1001 struct device *dev = &qm->pdev->dev; in hpre_cluster_debugfs_init()
1008 clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; in hpre_cluster_debugfs_init()
1013 tmp_d = debugfs_create_dir(buf, qm->debug.debug_root); in hpre_cluster_debugfs_init()
1021 regset->base = qm->io_base + hpre_cluster_offsets[i]; in hpre_cluster_debugfs_init()
1026 ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL, in hpre_cluster_debugfs_init()
1035 static int hpre_ctrl_debug_init(struct hisi_qm *qm) in hpre_ctrl_debug_init() argument
1039 ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE, in hpre_ctrl_debug_init()
1044 ret = hpre_pf_comm_regs_debugfs_init(qm); in hpre_ctrl_debug_init()
1048 return hpre_cluster_debugfs_init(qm); in hpre_ctrl_debug_init()
1051 static void hpre_dfx_debug_init(struct hisi_qm *qm) in hpre_dfx_debug_init() argument
1053 struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs; in hpre_dfx_debug_init()
1054 struct hpre *hpre = container_of(qm, struct hpre, qm); in hpre_dfx_debug_init()
1059 parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root); in hpre_dfx_debug_init()
1066 if (qm->fun_type == QM_HW_PF && hpre_regs) in hpre_dfx_debug_init()
1068 qm, &hpre_diff_regs_fops); in hpre_dfx_debug_init()
1071 static int hpre_debugfs_init(struct hisi_qm *qm) in hpre_debugfs_init() argument
1073 struct device *dev = &qm->pdev->dev; in hpre_debugfs_init()
1076 qm->debug.debug_root = debugfs_create_dir(dev_name(dev), in hpre_debugfs_init()
1079 qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET; in hpre_debugfs_init()
1080 qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN; in hpre_debugfs_init()
1081 ret = hisi_qm_regs_debugfs_init(qm, hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs)); in hpre_debugfs_init()
1087 hisi_qm_debug_init(qm); in hpre_debugfs_init()
1089 if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) { in hpre_debugfs_init()
1090 ret = hpre_ctrl_debug_init(qm); in hpre_debugfs_init()
1095 hpre_dfx_debug_init(qm); in hpre_debugfs_init()
1100 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); in hpre_debugfs_init()
1102 debugfs_remove_recursive(qm->debug.debug_root); in hpre_debugfs_init()
1106 static void hpre_debugfs_exit(struct hisi_qm *qm) in hpre_debugfs_exit() argument
1108 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); in hpre_debugfs_exit()
1110 debugfs_remove_recursive(qm->debug.debug_root); in hpre_debugfs_exit()
1113 static int hpre_pre_store_cap_reg(struct hisi_qm *qm) in hpre_pre_store_cap_reg() argument
1116 struct device *dev = &qm->pdev->dev; in hpre_pre_store_cap_reg()
1126 hpre_cap[i].cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info, in hpre_pre_store_cap_reg()
1127 hpre_pre_store_caps[i], qm->cap_ver); in hpre_pre_store_cap_reg()
1136 qm->cap_tables.dev_cap_table = hpre_cap; in hpre_pre_store_cap_reg()
1141 static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) in hpre_qm_init() argument
1151 qm->mode = uacce_mode; in hpre_qm_init()
1152 qm->pdev = pdev; in hpre_qm_init()
1153 qm->ver = pdev->revision; in hpre_qm_init()
1154 qm->sqe_size = HPRE_SQE_SIZE; in hpre_qm_init()
1155 qm->dev_name = hpre_name; in hpre_qm_init()
1157 qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) ? in hpre_qm_init()
1159 if (qm->fun_type == QM_HW_PF) { in hpre_qm_init()
1160 qm->qp_base = HPRE_PF_DEF_Q_BASE; in hpre_qm_init()
1161 qm->qp_num = pf_q_num; in hpre_qm_init()
1162 qm->debug.curr_qm_qp_num = pf_q_num; in hpre_qm_init()
1163 qm->qm_list = &hpre_devices; in hpre_qm_init()
1165 set_bit(QM_MODULE_PARAM, &qm->misc_ctl); in hpre_qm_init()
1168 ret = hisi_qm_init(qm); in hpre_qm_init()
1170 pci_err(pdev, "Failed to init hpre qm configures!\n"); in hpre_qm_init()
1175 ret = hpre_pre_store_cap_reg(qm); in hpre_qm_init()
1178 hisi_qm_uninit(qm); in hpre_qm_init()
1182 alg_msk = qm->cap_tables.dev_cap_table[HPRE_DEV_ALG_BITMAP_CAP_IDX].cap_val; in hpre_qm_init()
1183 ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs)); in hpre_qm_init()
1186 hisi_qm_uninit(qm); in hpre_qm_init()
1192 static int hpre_show_last_regs_init(struct hisi_qm *qm) in hpre_show_last_regs_init() argument
1196 struct qm_debug *debug = &qm->debug; in hpre_show_last_regs_init()
1201 clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; in hpre_show_last_regs_init()
1208 debug->last_words[i] = readl_relaxed(qm->io_base + in hpre_show_last_regs_init()
1212 io_base = qm->io_base + hpre_cluster_offsets[i]; in hpre_show_last_regs_init()
1223 static void hpre_show_last_regs_uninit(struct hisi_qm *qm) in hpre_show_last_regs_uninit() argument
1225 struct qm_debug *debug = &qm->debug; in hpre_show_last_regs_uninit()
1227 if (qm->fun_type == QM_HW_VF || !debug->last_words) in hpre_show_last_regs_uninit()
1234 static void hpre_show_last_dfx_regs(struct hisi_qm *qm) in hpre_show_last_dfx_regs() argument
1238 struct qm_debug *debug = &qm->debug; in hpre_show_last_dfx_regs()
1239 struct pci_dev *pdev = qm->pdev; in hpre_show_last_dfx_regs()
1245 if (qm->fun_type == QM_HW_VF || !debug->last_words) in hpre_show_last_dfx_regs()
1250 val = readl_relaxed(qm->io_base + hpre_com_dfx_regs[i].offset); in hpre_show_last_dfx_regs()
1256 clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; in hpre_show_last_dfx_regs()
1258 io_base = qm->io_base + hpre_cluster_offsets[i]; in hpre_show_last_dfx_regs()
1270 static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts) in hpre_log_hw_error() argument
1273 struct device *dev = &qm->pdev->dev; in hpre_log_hw_error()
1283 static u32 hpre_get_hw_err_status(struct hisi_qm *qm) in hpre_get_hw_err_status() argument
1285 return readl(qm->io_base + HPRE_INT_STATUS); in hpre_get_hw_err_status()
1288 static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) in hpre_clear_hw_err_status() argument
1292 writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT); in hpre_clear_hw_err_status()
1293 nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); in hpre_clear_hw_err_status()
1294 writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB); in hpre_clear_hw_err_status()
1297 static void hpre_open_axi_master_ooo(struct hisi_qm *qm) in hpre_open_axi_master_ooo() argument
1301 value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); in hpre_open_axi_master_ooo()
1303 qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); in hpre_open_axi_master_ooo()
1305 qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); in hpre_open_axi_master_ooo()
1308 static void hpre_err_info_init(struct hisi_qm *qm) in hpre_err_info_init() argument
1310 struct hisi_qm_err_info *err_info = &qm->err_info; in hpre_err_info_init()
1313 err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver); in hpre_err_info_init()
1314 err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver); in hpre_err_info_init()
1316 err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, in hpre_err_info_init()
1317 HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); in hpre_err_info_init()
1318 err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, in hpre_err_info_init()
1319 HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); in hpre_err_info_init()
1320 err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, in hpre_err_info_init()
1321 HPRE_QM_RESET_MASK_CAP, qm->cap_ver); in hpre_err_info_init()
1322 err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, in hpre_err_info_init()
1323 HPRE_RESET_MASK_CAP, qm->cap_ver); in hpre_err_info_init()
1344 struct hisi_qm *qm = &hpre->qm; in hpre_pf_probe_init() local
1347 ret = hpre_set_user_domain_and_cache(qm); in hpre_pf_probe_init()
1351 hpre_open_sva_prefetch(qm); in hpre_pf_probe_init()
1353 qm->err_ini = &hpre_err_ini; in hpre_pf_probe_init()
1354 qm->err_ini->err_info_init(qm); in hpre_pf_probe_init()
1355 hisi_qm_dev_err_init(qm); in hpre_pf_probe_init()
1356 ret = hpre_show_last_regs_init(qm); in hpre_pf_probe_init()
1358 pci_err(qm->pdev, "Failed to init last word regs!\n"); in hpre_pf_probe_init()
1366 struct hisi_qm *qm = &hpre->qm; in hpre_probe_init() local
1369 if (qm->fun_type == QM_HW_PF) { in hpre_probe_init()
1374 if (qm->ver >= QM_HW_V3) { in hpre_probe_init()
1376 qm->type_rate = type_rate; in hpre_probe_init()
1385 struct hisi_qm *qm; in hpre_probe() local
1393 qm = &hpre->qm; in hpre_probe()
1394 ret = hpre_qm_init(qm, pdev); in hpre_probe()
1396 pci_err(pdev, "Failed to init HPRE QM (%d)!\n", ret); in hpre_probe()
1406 ret = hisi_qm_start(qm); in hpre_probe()
1410 ret = hpre_debugfs_init(qm); in hpre_probe()
1414 ret = hisi_qm_alg_register(qm, &hpre_devices); in hpre_probe()
1420 if (qm->uacce) { in hpre_probe()
1421 ret = uacce_register(qm->uacce); in hpre_probe()
1428 if (qm->fun_type == QM_HW_PF && vfs_num) { in hpre_probe()
1434 hisi_qm_pm_init(qm); in hpre_probe()
1439 hisi_qm_alg_unregister(qm, &hpre_devices); in hpre_probe()
1442 hpre_debugfs_exit(qm); in hpre_probe()
1443 hisi_qm_stop(qm, QM_NORMAL); in hpre_probe()
1446 hpre_show_last_regs_uninit(qm); in hpre_probe()
1447 hisi_qm_dev_err_uninit(qm); in hpre_probe()
1450 hisi_qm_uninit(qm); in hpre_probe()
1457 struct hisi_qm *qm = pci_get_drvdata(pdev); in hpre_remove() local
1459 hisi_qm_pm_uninit(qm); in hpre_remove()
1460 hisi_qm_wait_task_finish(qm, &hpre_devices); in hpre_remove()
1461 hisi_qm_alg_unregister(qm, &hpre_devices); in hpre_remove()
1462 if (qm->fun_type == QM_HW_PF && qm->vfs_num) in hpre_remove()
1465 hpre_debugfs_exit(qm); in hpre_remove()
1466 hisi_qm_stop(qm, QM_NORMAL); in hpre_remove()
1468 if (qm->fun_type == QM_HW_PF) { in hpre_remove()
1469 hpre_cnt_regs_clear(qm); in hpre_remove()
1470 qm->debug.curr_qm_qp_num = 0; in hpre_remove()
1471 hpre_show_last_regs_uninit(qm); in hpre_remove()
1472 hisi_qm_dev_err_uninit(qm); in hpre_remove()
1475 hisi_qm_uninit(qm); in hpre_remove()