/kernel/linux/linux-5.10/drivers/infiniband/hw/hns/ |
D | hns_roce_main.c | 56 int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index) in hns_get_gid_index() argument 58 return gid_index * hr_dev->caps.num_ports + port; in hns_get_gid_index() 61 static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr) in hns_roce_set_mac() argument 66 if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN)) in hns_roce_set_mac() 70 hr_dev->dev_addr[port][i] = addr[i]; in hns_roce_set_mac() 72 phy_port = hr_dev->iboe.phy_port[port]; in hns_roce_set_mac() 73 return hr_dev->hw->set_mac(hr_dev, phy_port, addr); in hns_roce_set_mac() 78 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); in hns_roce_add_gid() local 82 if (port >= hr_dev->caps.num_ports) in hns_roce_add_gid() 85 ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr); in hns_roce_add_gid() [all …]
|
D | hns_roce_cmd.c | 43 static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param, in hns_roce_cmd_mbox_post_hw() argument 48 struct hns_roce_cmdq *cmd = &hr_dev->cmd; in hns_roce_cmd_mbox_post_hw() 52 ret = hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier, in hns_roce_cmd_mbox_post_hw() 60 static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, in __hns_roce_cmd_mbox_poll() argument 65 struct device *dev = hr_dev->dev; in __hns_roce_cmd_mbox_poll() 68 ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param, in __hns_roce_cmd_mbox_poll() 76 return hr_dev->hw->chk_mbox(hr_dev, timeout); in __hns_roce_cmd_mbox_poll() 79 static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, in hns_roce_cmd_mbox_poll() argument 85 down(&hr_dev->cmd.poll_sem); in hns_roce_cmd_mbox_poll() 86 ret = __hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, in_modifier, in hns_roce_cmd_mbox_poll() [all …]
|
D | hns_roce_cq.c | 42 static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) in alloc_cqc() argument 46 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_cqc() 51 ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts), in alloc_cqc() 58 cq_table = &hr_dev->cq_table; in alloc_cqc() 66 ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); in alloc_cqc() 80 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); in alloc_cqc() 86 hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle); in alloc_cqc() 89 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0, in alloc_cqc() 91 hns_roce_free_cmd_mailbox(hr_dev, mailbox); in alloc_cqc() 111 hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); in alloc_cqc() [all …]
|
D | hns_roce_pd.c | 38 static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn) in hns_roce_pd_alloc() argument 40 return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0; in hns_roce_pd_alloc() 43 static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn) in hns_roce_pd_free() argument 45 hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn, BITMAP_NO_RR); in hns_roce_pd_free() 48 int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev) in hns_roce_init_pd_table() argument 50 return hns_roce_bitmap_init(&hr_dev->pd_bitmap, hr_dev->caps.num_pds, in hns_roce_init_pd_table() 51 hr_dev->caps.num_pds - 1, in hns_roce_init_pd_table() 52 hr_dev->caps.reserved_pds, 0); in hns_roce_init_pd_table() 55 void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev) in hns_roce_cleanup_pd_table() argument 57 hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap); in hns_roce_cleanup_pd_table() [all …]
|
D | hns_roce_qp.c | 50 struct device *dev = flush_work->hr_dev->dev; in flush_work_handle() 73 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) in init_flush_work() argument 77 flush_work->hr_dev = hr_dev; in init_flush_work() 80 queue_work(hr_dev->irq_workq, &flush_work->work); in init_flush_work() 83 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) in hns_roce_qp_event() argument 85 struct device *dev = hr_dev->dev; in hns_roce_qp_event() 88 xa_lock(&hr_dev->qp_table_xa); in hns_roce_qp_event() 89 qp = __hns_roce_qp_lookup(hr_dev, qpn); in hns_roce_qp_event() 92 xa_unlock(&hr_dev->qp_table_xa); in hns_roce_qp_event() 99 if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && in hns_roce_qp_event() [all …]
|
D | hns_roce_mr.c | 51 static int hns_roce_hw_create_mpt(struct hns_roce_dev *hr_dev, in hns_roce_hw_create_mpt() argument 55 return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0, in hns_roce_hw_create_mpt() 60 int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev, in hns_roce_hw_destroy_mpt() argument 64 return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0, in hns_roce_hw_destroy_mpt() 69 static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, in alloc_mr_key() argument 72 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_mr_key() 77 err = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &obj); in alloc_mr_key() 92 err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, obj); in alloc_mr_key() 100 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, obj, BITMAP_NO_RR); in alloc_mr_key() 104 static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) in free_mr_key() argument [all …]
|
D | hns_roce_srq.c | 12 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type) in hns_roce_srq_event() argument 14 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; in hns_roce_srq_event() 18 srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1)); in hns_roce_srq_event() 24 dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn); in hns_roce_srq_event() 37 struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device); in hns_roce_ib_srq_event() local 52 dev_err(hr_dev->dev, in hns_roce_ib_srq_event() 80 static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, in alloc_srqc() argument 83 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; in alloc_srqc() 84 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_srqc() 93 ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe, in alloc_srqc() [all …]
|
D | hns_roce_hem.c | 52 bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type) in hns_roce_check_whether_mhop() argument 58 hop_num = hr_dev->caps.qpc_hop_num; in hns_roce_check_whether_mhop() 61 hop_num = hr_dev->caps.mpt_hop_num; in hns_roce_check_whether_mhop() 64 hop_num = hr_dev->caps.cqc_hop_num; in hns_roce_check_whether_mhop() 67 hop_num = hr_dev->caps.srqc_hop_num; in hns_roce_check_whether_mhop() 70 hop_num = hr_dev->caps.sccc_hop_num; in hns_roce_check_whether_mhop() 73 hop_num = hr_dev->caps.qpc_timer_hop_num; in hns_roce_check_whether_mhop() 76 hop_num = hr_dev->caps.cqc_timer_hop_num; in hns_roce_check_whether_mhop() 123 static int get_hem_table_config(struct hns_roce_dev *hr_dev, in get_hem_table_config() argument 127 struct device *dev = hr_dev->dev; in get_hem_table_config() [all …]
|
D | hns_roce_hw_v2.c | 247 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); in check_inl_data_len() local 251 ibdev_err(&hr_dev->ib_dev, in check_inl_data_len() 264 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); in set_rc_inl() local 266 struct ib_device *ibdev = &hr_dev->ib_dev; in set_rc_inl() 358 static int check_send_valid(struct hns_roce_dev *hr_dev, in check_send_valid() argument 361 struct ib_device *ibdev = &hr_dev->ib_dev; in check_send_valid() 376 } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) { in check_send_valid() 378 hr_dev->state); in check_send_valid() 435 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); in set_ud_wqe() local 508 if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) { in set_ud_wqe() [all …]
|
D | hns_roce_hw_v1.c | 65 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_v1_post_send() local 71 struct device *dev = &hr_dev->pdev->dev; in hns_roce_v1_post_send() 141 smac = (u8 *)hr_dev->dev_addr[qp->port]; in hns_roce_v1_post_send() 187 hns_get_gid_index(hr_dev, qp->phy_port, in hns_roce_v1_post_send() 287 hr_dev->caps.max_sq_inline) { in hns_roce_v1_post_send() 292 hr_dev->caps.max_sq_inline); in hns_roce_v1_post_send() 349 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_v1_post_recv() local 350 struct device *dev = &hr_dev->pdev->dev; in hns_roce_v1_post_recv() 439 static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev, in hns_roce_set_db_event_mode() argument 445 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); in hns_roce_set_db_event_mode() [all …]
|
D | hns_roce_device.h | 620 struct hns_roce_dev *hr_dev; member 727 struct hns_roce_dev *hr_dev; member 881 int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn, 892 int (*reset)(struct hns_roce_dev *hr_dev, bool enable); 893 int (*cmq_init)(struct hns_roce_dev *hr_dev); 894 void (*cmq_exit)(struct hns_roce_dev *hr_dev); 895 int (*hw_profile)(struct hns_roce_dev *hr_dev); 896 int (*hw_init)(struct hns_roce_dev *hr_dev); 897 void (*hw_exit)(struct hns_roce_dev *hr_dev); 898 int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param, [all …]
|
D | hns_roce_hem.h | 110 void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem); 111 int hns_roce_table_get(struct hns_roce_dev *hr_dev, 113 void hns_roce_table_put(struct hns_roce_dev *hr_dev, 115 void *hns_roce_table_find(struct hns_roce_dev *hr_dev, 118 int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, 122 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, 124 void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev); 125 int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, 128 bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type); 133 int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, [all …]
|
D | hns_roce_alloc.c | 160 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf) in hns_roce_buf_free() argument 162 struct device *dev = hr_dev->dev; in hns_roce_buf_free() 184 int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, in hns_roce_buf_alloc() argument 188 struct device *dev = hr_dev->dev; in hns_roce_buf_alloc() 234 int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, in hns_roce_get_kmem_bufs() argument 242 dev_err(hr_dev->dev, in hns_roce_get_kmem_bufs() 255 int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, in hns_roce_get_umem_bufs() argument 265 dev_err(hr_dev->dev, "failed to check umem page shift %u!\n", in hns_roce_get_umem_bufs() 285 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev) in hns_roce_cleanup_bitmap() argument 287 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) in hns_roce_cleanup_bitmap() [all …]
|
D | hns_roce_hw_v2_dfx.c | 9 int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn, in hns_roce_v2_query_cqc_info() argument 16 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); in hns_roce_v2_query_cqc_info() 21 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0, in hns_roce_v2_query_cqc_info() 25 dev_err(hr_dev->dev, "QUERY cqc cmd process error\n"); in hns_roce_v2_query_cqc_info() 32 hns_roce_free_cmd_mailbox(hr_dev, mailbox); in hns_roce_v2_query_cqc_info()
|
D | hns_roce_db.c | 126 int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db, in hns_roce_alloc_db() argument 132 mutex_lock(&hr_dev->pgdir_mutex); in hns_roce_alloc_db() 134 list_for_each_entry(pgdir, &hr_dev->pgdir_list, list) in hns_roce_alloc_db() 138 pgdir = hns_roce_alloc_db_pgdir(hr_dev->dev); in hns_roce_alloc_db() 144 list_add(&pgdir->list, &hr_dev->pgdir_list); in hns_roce_alloc_db() 150 mutex_unlock(&hr_dev->pgdir_mutex); in hns_roce_alloc_db() 155 void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db) in hns_roce_free_db() argument 160 mutex_lock(&hr_dev->pgdir_mutex); in hns_roce_free_db() 175 dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page, in hns_roce_free_db() 181 mutex_unlock(&hr_dev->pgdir_mutex); in hns_roce_free_db()
|
D | hns_roce_restrack.c | 82 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); in hns_roce_fill_res_cq_entry() local 88 if (!hr_dev->dfx->query_cqc_info) in hns_roce_fill_res_cq_entry() 95 ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)context); in hns_roce_fill_res_cq_entry()
|
D | hns_roce_cmd.h | 142 int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, 147 *hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev); 148 void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
|
D | hns_roce_ah.c | 60 struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device); in hns_roce_create_ah() local 80 if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) { in hns_roce_create_ah()
|
D | hns_roce_hw_v2.h | 1986 int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn, 1989 static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2], in hns_roce_write64() argument 1992 struct hns_roce_v2_priv *priv = hr_dev->priv; in hns_roce_write64() 1996 if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle)) in hns_roce_write64()
|