/drivers/net/ethernet/mellanox/mlx4/ |
D | srq.c | 43 void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) in mlx4_srq_event() argument 50 srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1)); in mlx4_srq_event() 57 mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn); in mlx4_srq_event() 96 int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn) in __mlx4_srq_alloc_icm() argument 102 *srqn = mlx4_bitmap_alloc(&srq_table->bitmap); in __mlx4_srq_alloc_icm() 103 if (*srqn == -1) in __mlx4_srq_alloc_icm() 106 err = mlx4_table_get(dev, &srq_table->table, *srqn, GFP_KERNEL); in __mlx4_srq_alloc_icm() 110 err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn, GFP_KERNEL); in __mlx4_srq_alloc_icm() 116 mlx4_table_put(dev, &srq_table->table, *srqn); in __mlx4_srq_alloc_icm() 119 mlx4_bitmap_free(&srq_table->bitmap, *srqn, MLX4_NO_RR); in __mlx4_srq_alloc_icm() [all …]
|
D | resource_tracker.c | 1720 int srqn; in srq_alloc_res() local 1729 err = __mlx4_srq_alloc_icm(dev, &srqn); in srq_alloc_res() 1735 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0); in srq_alloc_res() 1738 __mlx4_srq_free_icm(dev, srqn); in srq_alloc_res() 1742 set_param_l(out_param, srqn); in srq_alloc_res() 2237 int srqn; in srq_free_res() local 2242 srqn = get_param_l(&in_param); in srq_free_res() 2243 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0); in srq_free_res() 2248 __mlx4_srq_free_icm(dev, srqn); in srq_free_res() 2468 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1; in qp_get_mtt_size() [all …]
|
D | en_resources.c | 75 context->srqn = cpu_to_be32(7 << 28); /* this fills bits 30:28 */ in mlx4_en_fill_qp_context()
|
D | mlx4.h | 911 int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn); 912 void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn); 1140 void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
|
D | eq.c | 520 be32_to_cpu(eqe->event.srq.srqn) in mlx4_eq_int() 531 be32_to_cpu(eqe->event.srq.srqn), in mlx4_eq_int() 542 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & in mlx4_eq_int()
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | srq.c | 41 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type) in mlx5_srq_event() argument 48 srq = radix_tree_lookup(&table->tree, srqn); in mlx5_srq_event() 55 mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn); in mlx5_srq_event() 65 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) in mlx5_core_get_srq() argument 72 srq = radix_tree_lookup(&table->tree, srqn); in mlx5_core_get_srq() 100 srq->srqn = be32_to_cpu(out.srqn) & 0xffffff; in mlx5_core_create_srq() 106 err = radix_tree_insert(&table->tree, srq->srqn, srq); in mlx5_core_create_srq() 109 mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn); in mlx5_core_create_srq() 118 din.srqn = cpu_to_be32(srq->srqn); in mlx5_core_create_srq() 134 tmp = radix_tree_delete(&table->tree, srq->srqn); in mlx5_core_destroy_srq() [all …]
|
/drivers/infiniband/hw/mthca/ |
D | mthca_srq.c | 128 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); in mthca_arbel_init_srq_context() 229 srq->srqn = mthca_alloc(&dev->srq_table.alloc); in mthca_alloc_srq() 230 if (srq->srqn == -1) in mthca_alloc_srq() 234 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); in mthca_alloc_srq() 240 srq->srqn, &srq->db); in mthca_alloc_srq() 268 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn); in mthca_alloc_srq() 277 srq->srqn & (dev->limits.num_srqs - 1), in mthca_alloc_srq() 295 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn); in mthca_alloc_srq() 311 mthca_table_put(dev, dev->srq_table.table, srq->srqn); in mthca_alloc_srq() 314 mthca_free(&dev->srq_table.alloc, srq->srqn); in mthca_alloc_srq() [all …]
|
D | mthca_user.h | 99 __u32 srqn; member
|
D | mthca_provider.h | 226 int srqn; member
|
D | mthca_eq.c | 146 __be32 srqn; member 302 mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, in mthca_eq_int()
|
D | mthca_dev.h | 519 void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
|
D | mthca_qp.c | 151 __be32 srqn; member 769 qp_context->srqn = cpu_to_be32(1 << 24 | in __mthca_modify_qp() 770 to_msrq(ibqp->srq)->srqn); in __mthca_modify_qp()
|
D | mthca_provider.c | 479 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) { in mthca_create_srq()
|
/drivers/infiniband/hw/mlx5/ |
D | srq.c | 68 type, srq->srqn); in mlx5_ib_srq_event() 315 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); in mlx5_ib_create_srq() 318 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; in mlx5_ib_create_srq() 321 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) { in mlx5_ib_create_srq()
|
D | user.h | 117 __u32 srqn; member
|
D | cq.c | 184 be32_to_cpu(cqe->srqn)); in handle_responder() 881 if (srq && (ntohl(cqe64->srqn) & 0xffffff)) in __mlx5_ib_cq_clean()
|
D | qp.c | 942 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); in create_qp_common() 948 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); in create_qp_common() 953 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn); in create_qp_common() 956 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); in create_qp_common()
|
/drivers/infiniband/hw/mlx4/ |
D | srq.c | 63 "on SRQ %06x\n", type, srq->srqn); in mlx4_ib_srq_event() 191 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; in mlx4_ib_create_srq() 194 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) { in mlx4_ib_create_srq()
|
D | user.h | 94 __u32 srqn; member
|
D | qp.c | 1640 context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); in __mlx4_ib_modify_qp() 1688 context->srqn = cpu_to_be32(7 << 28); in __mlx4_ib_modify_qp()
|
/drivers/infiniband/core/ |
D | uverbs_cmd.c | 3031 resp.srqn = srq->ext.xrc.srq_num; in __uverbs_create_xsrq()
|