Searched refs:hr_cq (Results 1 – 7 of 7) sorted by relevance
/kernel/linux/linux-5.10/drivers/infiniband/hw/hns/ |
D | hns_roce_cq.c | 42 static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) in alloc_cqc() argument 51 ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts), in alloc_cqc() 59 ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn); in alloc_cqc() 66 ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); in alloc_cqc() 69 hr_cq->cqn, ret); in alloc_cqc() 73 ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL)); in alloc_cqc() 86 hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle); in alloc_cqc() 89 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0, in alloc_cqc() 95 hr_cq->cqn, ret); in alloc_cqc() 99 hr_cq->cons_index = 0; in alloc_cqc() [all …]
|
D | hns_roce_hw_v1.c | 1897 static void *get_cqe(struct hns_roce_cq *hr_cq, int n) in get_cqe() argument 1899 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * HNS_ROCE_V1_CQE_SIZE); in get_cqe() 1902 static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n) in get_sw_cqe() argument 1904 struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe); in get_sw_cqe() 1908 !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL; in get_sw_cqe() 1911 static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq) in next_cqe_sw() argument 1913 return get_sw_cqe(hr_cq, hr_cq->cons_index); in next_cqe_sw() 1916 static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index) in hns_roce_v1_cq_set_ci() argument 1920 doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1)); in hns_roce_v1_cq_set_ci() 1928 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn); in hns_roce_v1_cq_set_ci() [all …]
|
D | hns_roce_restrack.c | 83 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); in hns_roce_fill_res_cq_entry() local 95 ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)context); in hns_roce_fill_res_cq_entry()
|
D | hns_roce_hw_v2.c | 2969 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) in get_cqe_v2() argument 2971 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size); in get_cqe_v2() 2974 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n) in get_sw_cqe_v2() argument 2976 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe); in get_sw_cqe_v2() 2980 !!(n & hr_cq->cq_depth)) ? cqe : NULL; in get_sw_cqe_v2() 2983 static inline void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 ci) in hns_roce_v2_cq_set_ci() argument 2985 *hr_cq->set_ci_db = ci & V2_CQ_DB_PARAMETER_CONS_IDX_M; in hns_roce_v2_cq_set_ci() 2988 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, in __hns_roce_v2_cq_clean() argument 2997 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index); in __hns_roce_v2_cq_clean() 2999 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe) in __hns_roce_v2_cq_clean() [all …]
|
D | hns_roce_main.c | 808 struct hns_roce_cq *hr_cq = to_hr_cq(cq); in check_and_get_armed_cq() local 811 spin_lock_irqsave(&hr_cq->lock, flags); in check_and_get_armed_cq() 813 if (!hr_cq->is_armed) { in check_and_get_armed_cq() 814 hr_cq->is_armed = 1; in check_and_get_armed_cq() 815 list_add_tail(&hr_cq->node, cq_list); in check_and_get_armed_cq() 818 spin_unlock_irqrestore(&hr_cq->lock, flags); in check_and_get_armed_cq() 824 struct hns_roce_cq *hr_cq; in hns_roce_handle_device_err() local 844 list_for_each_entry(hr_cq, &cq_list, node) in hns_roce_handle_device_err() 845 hns_roce_cq_completion(hr_dev, hr_cq->cqn); in hns_roce_handle_device_err()
|
D | hns_roce_qp.c | 1278 struct hns_roce_cq *hr_cq; in hns_roce_wq_overflow() local 1285 hr_cq = to_hr_cq(ib_cq); in hns_roce_wq_overflow() 1286 spin_lock(&hr_cq->lock); in hns_roce_wq_overflow() 1288 spin_unlock(&hr_cq->lock); in hns_roce_wq_overflow()
|
D | hns_roce_device.h | 918 struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
|