/drivers/net/ethernet/ibm/ehea/ |
D | ehea_phyp.c | 213 struct ehea_qp_init_attr *init_attr, const u32 pd, in ehea_h_alloc_resource_qp() argument 220 EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0) in ehea_h_alloc_resource_qp() 224 | EHEA_BMASK_SET(H_ALL_RES_QP_LL_QP, init_attr->low_lat_rq1) in ehea_h_alloc_resource_qp() 227 | EHEA_BMASK_SET(H_ALL_RES_QP_SIGT, init_attr->signalingtype) in ehea_h_alloc_resource_qp() 231 | EHEA_BMASK_SET(H_ALL_RES_QP_TOKEN, init_attr->qp_token); in ehea_h_alloc_resource_qp() 235 get_order_of_qentries(init_attr->max_nr_send_wqes)) in ehea_h_alloc_resource_qp() 237 get_order_of_qentries(init_attr->max_nr_rwqes_rq1)) in ehea_h_alloc_resource_qp() 239 get_order_of_qentries(init_attr->max_nr_rwqes_rq2)) in ehea_h_alloc_resource_qp() 241 get_order_of_qentries(init_attr->max_nr_rwqes_rq3)) in ehea_h_alloc_resource_qp() 242 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SSGE, init_attr->wqe_size_enc_sq) in ehea_h_alloc_resource_qp() [all …]
|
D | ehea_qmr.c | 418 u32 pd, struct ehea_qp_init_attr *init_attr) in ehea_create_qp() argument 433 hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd, in ehea_create_qp() 440 wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq); in ehea_create_qp() 441 wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1); in ehea_create_qp() 442 wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2); in ehea_create_qp() 443 wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3); in ehea_create_qp() 445 ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages, in ehea_create_qp() 447 init_attr->act_wqe_size_enc_sq, adapter, in ehea_create_qp() 455 init_attr->nr_rq1_pages, in ehea_create_qp() 457 init_attr->act_wqe_size_enc_rq1, in ehea_create_qp() [all …]
|
D | ehea_main.c | 654 pr->qp->init_attr.qp_nr); in ehea_treat_poll_error() 1277 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; in ehea_fill_port_res() local 1281 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); in ehea_fill_port_res() 1283 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); in ehea_fill_port_res() 1391 port->port_res[i].qp->init_attr.qp_nr; in ehea_configure_port() 1394 port->port_res[0].qp->init_attr.qp_nr; in ehea_configure_port() 1468 struct ehea_qp_init_attr *init_attr = NULL; in ehea_init_port_res() local 1513 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); in ehea_init_port_res() 1514 if (!init_attr) { in ehea_init_port_res() 1520 init_attr->low_lat_rq1 = 1; in ehea_init_port_res() [all …]
|
/drivers/infiniband/sw/rdmavt/ |
D | qp.c | 621 struct ib_qp_init_attr *init_attr, in rvt_create_qp() argument 638 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge || in rvt_create_qp() 639 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr || in rvt_create_qp() 640 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO)) in rvt_create_qp() 645 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO && in rvt_create_qp() 646 init_attr->qp_type != IB_QPT_RC) in rvt_create_qp() 649 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ? in rvt_create_qp() 653 if (!init_attr->srq) { in rvt_create_qp() 654 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge || in rvt_create_qp() 655 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr) in rvt_create_qp() [all …]
|
D | qp.h | 56 struct ib_qp_init_attr *init_attr, 62 int attr_mask, struct ib_qp_init_attr *init_attr);
|
/drivers/infiniband/hw/cxgb3/ |
D | iwch_qp.c | 818 struct t3_rdma_init_attr init_attr; in rdma_init() local 821 init_attr.tid = qhp->ep->hwtid; in rdma_init() 822 init_attr.qpid = qhp->wq.qpid; in rdma_init() 823 init_attr.pdid = qhp->attr.pd; in rdma_init() 824 init_attr.scqid = qhp->attr.scq; in rdma_init() 825 init_attr.rcqid = qhp->attr.rcq; in rdma_init() 826 init_attr.rq_addr = qhp->wq.rq_addr; in rdma_init() 827 init_attr.rq_size = 1 << qhp->wq.rq_size_log2; in rdma_init() 828 init_attr.mpaattrs = uP_RI_MPA_IETF_ENABLE | in rdma_init() 833 init_attr.qpcaps = uP_RI_QP_RDMA_READ_ENABLE | in rdma_init() [all …]
|
/drivers/infiniband/ulp/ipoib/ |
D | ipoib_verbs.c | 134 struct ib_qp_init_attr init_attr = { in ipoib_transport_dev_init() local 197 init_attr.send_cq = priv->send_cq; in ipoib_transport_dev_init() 198 init_attr.recv_cq = priv->recv_cq; in ipoib_transport_dev_init() 201 init_attr.create_flags |= IB_QP_CREATE_IPOIB_UD_LSO; in ipoib_transport_dev_init() 204 init_attr.create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; in ipoib_transport_dev_init() 207 init_attr.create_flags |= IB_QP_CREATE_NETIF_QP; in ipoib_transport_dev_init() 209 priv->qp = ib_create_qp(priv->pd, &init_attr); in ipoib_transport_dev_init() 234 if (init_attr.cap.max_send_sge > 1) in ipoib_transport_dev_init() 237 priv->max_send_sge = init_attr.cap.max_send_sge; in ipoib_transport_dev_init()
|
/drivers/infiniband/hw/mlx4/ |
D | srq.c | 73 struct ib_srq_init_attr *init_attr, in mlx4_ib_create_srq() argument 88 if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes || in mlx4_ib_create_srq() 89 init_attr->attr.max_sge > dev->dev->caps.max_srq_sge) in mlx4_ib_create_srq() 98 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); in mlx4_ib_create_srq() 99 srq->msrq.max_gs = init_attr->attr.max_sge; in mlx4_ib_create_srq() 186 cqn = (init_attr->srq_type == IB_SRQT_XRC) ? in mlx4_ib_create_srq() 187 to_mcq(init_attr->ext.xrc.cq)->mcq.cqn : 0; in mlx4_ib_create_srq() 188 xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ? in mlx4_ib_create_srq() 189 to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn : in mlx4_ib_create_srq() 205 init_attr->attr.max_wr = srq->msrq.max - 1; in mlx4_ib_create_srq()
|
D | qp.c | 640 struct ib_qp_init_attr *init_attr, in create_qp_common() argument 649 enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; in create_qp_common() 657 !(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) { in create_qp_common() 658 if (init_attr->qp_type == IB_QPT_GSI) in create_qp_common() 670 init_attr->cap.max_recv_sge++; in create_qp_common() 671 } else if (init_attr->create_flags & MLX4_IB_SRIOV_TUNNEL_QP) { in create_qp_common() 673 container_of(init_attr, in create_qp_common() 674 struct mlx4_ib_qp_tunnel_init_attr, init_attr); in create_qp_common() 723 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) in create_qp_common() 726 err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp); in create_qp_common() [all …]
|
D | mad.c | 1777 qp_init_attr.init_attr.send_cq = ctx->cq; in create_pv_sqp() 1778 qp_init_attr.init_attr.recv_cq = ctx->cq; in create_pv_sqp() 1779 qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; in create_pv_sqp() 1780 qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS; in create_pv_sqp() 1781 qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS; in create_pv_sqp() 1782 qp_init_attr.init_attr.cap.max_send_sge = 1; in create_pv_sqp() 1783 qp_init_attr.init_attr.cap.max_recv_sge = 1; in create_pv_sqp() 1785 qp_init_attr.init_attr.qp_type = IB_QPT_UD; in create_pv_sqp() 1786 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP; in create_pv_sqp() 1793 qp_init_attr.init_attr.qp_type = qp_type; in create_pv_sqp() [all …]
|
/drivers/infiniband/ulp/iser/ |
D | iser_verbs.c | 426 struct ib_qp_init_attr init_attr; in iser_create_ib_conn_res() local 435 memset(&init_attr, 0, sizeof init_attr); in iser_create_ib_conn_res() 449 init_attr.event_handler = iser_qp_event_callback; in iser_create_ib_conn_res() 450 init_attr.qp_context = (void *)ib_conn; in iser_create_ib_conn_res() 451 init_attr.send_cq = ib_conn->comp->cq; in iser_create_ib_conn_res() 452 init_attr.recv_cq = ib_conn->comp->cq; in iser_create_ib_conn_res() 453 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; in iser_create_ib_conn_res() 454 init_attr.cap.max_send_sge = 2; in iser_create_ib_conn_res() 455 init_attr.cap.max_recv_sge = 1; in iser_create_ib_conn_res() 456 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; in iser_create_ib_conn_res() [all …]
|
/drivers/infiniband/hw/mlx5/ |
D | qp.c | 900 struct ib_qp_init_attr *init_attr, in create_kernel_qp() argument 913 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | in create_kernel_qp() 919 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) in create_kernel_qp() 931 err = calc_sq_size(dev, init_attr, qp); in create_kernel_qp() 964 if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) { in create_kernel_qp() 1333 struct ib_qp_init_attr *init_attr, in create_rss_raw_qp_tir() argument 1351 if (init_attr->qp_type != IB_QPT_RAW_PACKET) in create_rss_raw_qp_tir() 1354 if (init_attr->create_flags || init_attr->send_cq) in create_rss_raw_qp_tir() 1404 init_attr->rwq_ind_tbl->ind_tbl_num); in create_rss_raw_qp_tir() 1431 if (!init_attr->rwq_ind_tbl->log_ind_tbl_size) in create_rss_raw_qp_tir() [all …]
|
D | srq.c | 241 struct ib_srq_init_attr *init_attr, in mlx5_ib_create_srq() argument 253 if (init_attr->attr.max_wr >= max_srq_wqes) { in mlx5_ib_create_srq() 255 init_attr->attr.max_wr, in mlx5_ib_create_srq() 266 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); in mlx5_ib_create_srq() 267 srq->msrq.max_gs = init_attr->attr.max_sge; in mlx5_ib_create_srq() 283 in.type = init_attr->srq_type; in mlx5_ib_create_srq() 300 if (init_attr->srq_type == IB_SRQT_XRC) { in mlx5_ib_create_srq() 301 in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn; in mlx5_ib_create_srq() 302 in.cqn = to_mcq(init_attr->ext.xrc.cq)->mcq.cqn; in mlx5_ib_create_srq() 303 } else if (init_attr->srq_type == IB_SRQT_BASIC) { in mlx5_ib_create_srq() [all …]
|
D | gsi.c | 116 struct ib_qp_init_attr *init_attr) in mlx5_ib_gsi_create_qp() argument 120 struct ib_qp_init_attr hw_init_attr = *init_attr; in mlx5_ib_gsi_create_qp() 121 const u8 port_num = init_attr->port_num; in mlx5_ib_gsi_create_qp() 145 gsi->outstanding_wrs = kcalloc(init_attr->cap.max_send_wr, in mlx5_ib_gsi_create_qp() 166 gsi->cap = init_attr->cap; in mlx5_ib_gsi_create_qp() 167 gsi->sq_sig_type = init_attr->sq_sig_type; in mlx5_ib_gsi_create_qp() 171 gsi->cq = ib_alloc_cq(pd->device, gsi, init_attr->cap.max_send_wr, 0, in mlx5_ib_gsi_create_qp() 195 dev->devr.ports[init_attr->port_num - 1].gsi = gsi; in mlx5_ib_gsi_create_qp() 254 struct ib_qp_init_attr init_attr = { in create_gsi_ud_qp() local 269 return ib_create_qp(pd, &init_attr); in create_gsi_ud_qp()
|
D | mlx5_ib.h | 747 struct ib_srq_init_attr *init_attr, 756 struct ib_qp_init_attr *init_attr, 844 struct ib_wq_init_attr *init_attr, 850 struct ib_rwq_ind_table_init_attr *init_attr, 901 struct ib_qp_init_attr *init_attr);
|
D | main.c | 2473 struct ib_qp_init_attr *init_attr = NULL; in create_umr_res() local 2481 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); in create_umr_res() 2482 if (!attr || !init_attr) { in create_umr_res() 2501 init_attr->send_cq = cq; in create_umr_res() 2502 init_attr->recv_cq = cq; in create_umr_res() 2503 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; in create_umr_res() 2504 init_attr->cap.max_send_wr = MAX_UMR_WR; in create_umr_res() 2505 init_attr->cap.max_send_sge = 1; in create_umr_res() 2506 init_attr->qp_type = MLX5_IB_QPT_REG_UMR; in create_umr_res() 2507 init_attr->port_num = 1; in create_umr_res() [all …]
|
/drivers/infiniband/hw/hns/ |
D | hns_roce_qp.c | 390 struct ib_qp_init_attr *init_attr, in hns_roce_create_qp_common() argument 405 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) in hns_roce_create_qp_common() 410 ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject, in hns_roce_create_qp_common() 411 !!init_attr->srq, hr_qp); in hns_roce_create_qp_common() 454 if (init_attr->create_flags & in hns_roce_create_qp_common() 461 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { in hns_roce_create_qp_common() 468 ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap, in hns_roce_create_qp_common() 526 if ((init_attr->qp_type) == IB_QPT_GSI) { in hns_roce_create_qp_common() 571 struct ib_qp_init_attr *init_attr, in hns_roce_create_qp() argument 580 switch (init_attr->qp_type) { in hns_roce_create_qp() [all …]
|
/drivers/infiniband/hw/mthca/ |
D | mthca_provider.c | 440 struct ib_srq_init_attr *init_attr, in mthca_create_srq() argument 448 if (init_attr->srq_type != IB_SRQT_BASIC) in mthca_create_srq() 475 &init_attr->attr, srq); in mthca_create_srq() 516 struct ib_qp_init_attr *init_attr, in mthca_create_qp() argument 523 if (init_attr->create_flags) in mthca_create_qp() 526 switch (init_attr->qp_type) { in mthca_create_qp() 571 to_mcq(init_attr->send_cq), in mthca_create_qp() 572 to_mcq(init_attr->recv_cq), in mthca_create_qp() 573 init_attr->qp_type, init_attr->sq_sig_type, in mthca_create_qp() 574 &init_attr->cap, qp); in mthca_create_qp() [all …]
|
/drivers/infiniband/hw/i40iw/ |
D | i40iw_verbs.c | 578 struct ib_qp_init_attr *init_attr, in i40iw_create_qp() argument 605 if (init_attr->create_flags) in i40iw_create_qp() 607 if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE) in i40iw_create_qp() 608 init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE; in i40iw_create_qp() 610 if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT) in i40iw_create_qp() 611 init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; in i40iw_create_qp() 615 sq_size = init_attr->cap.max_send_wr; in i40iw_create_qp() 616 rq_size = init_attr->cap.max_recv_wr; in i40iw_create_qp() 620 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge; in i40iw_create_qp() 621 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge; in i40iw_create_qp() [all …]
|
/drivers/infiniband/hw/usnic/ |
D | usnic_ib_verbs.c | 469 struct ib_qp_init_attr *init_attr, in usnic_ib_create_qp() argument 486 if (init_attr->create_flags) in usnic_ib_create_qp() 503 if (init_attr->qp_type != IB_QPT_UD) { in usnic_ib_create_qp() 505 us_ibdev->ib_dev.name, init_attr->qp_type); in usnic_ib_create_qp() 511 cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2; in usnic_ib_create_qp()
|
D | usnic_ib_verbs.h | 58 struct ib_qp_init_attr *init_attr,
|
/drivers/nvme/host/ |
D | rdma.c | 257 struct ib_qp_init_attr init_attr; in nvme_rdma_create_qp() local 260 memset(&init_attr, 0, sizeof(init_attr)); in nvme_rdma_create_qp() 261 init_attr.event_handler = nvme_rdma_qp_event; in nvme_rdma_create_qp() 263 init_attr.cap.max_send_wr = factor * queue->queue_size + 1; in nvme_rdma_create_qp() 265 init_attr.cap.max_recv_wr = queue->queue_size + 1; in nvme_rdma_create_qp() 266 init_attr.cap.max_recv_sge = 1; in nvme_rdma_create_qp() 267 init_attr.cap.max_send_sge = 1 + NVME_RDMA_MAX_INLINE_SEGMENTS; in nvme_rdma_create_qp() 268 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; in nvme_rdma_create_qp() 269 init_attr.qp_type = IB_QPT_RC; in nvme_rdma_create_qp() 270 init_attr.send_cq = queue->ib_cq; in nvme_rdma_create_qp() [all …]
|
/drivers/infiniband/hw/nes/ |
D | nes_verbs.c | 1010 struct ib_qp_init_attr *init_attr, struct ib_udata *udata) in nes_create_qp() argument 1040 if (init_attr->create_flags) in nes_create_qp() 1044 switch (init_attr->qp_type) { in nes_create_qp() 1047 init_attr->cap.max_inline_data = 0; in nes_create_qp() 1049 init_attr->cap.max_inline_data = 64; in nes_create_qp() 1051 sq_size = init_attr->cap.max_send_wr; in nes_create_qp() 1052 rq_size = init_attr->cap.max_recv_wr; in nes_create_qp() 1064 init_attr->cap.max_send_wr = sq_size -2; in nes_create_qp() 1065 init_attr->cap.max_recv_wr = rq_size -1; in nes_create_qp() 1174 nescq = to_nescq(init_attr->send_cq); in nes_create_qp() [all …]
|
/drivers/infiniband/ulp/srp/ |
D | ib_srp.c | 470 struct ib_qp_init_attr *init_attr; in srp_create_ch_ib() local 478 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); in srp_create_ch_ib() 479 if (!init_attr) in srp_create_ch_ib() 497 init_attr->event_handler = srp_qp_event; in srp_create_ch_ib() 498 init_attr->cap.max_send_wr = m * target->queue_size; in srp_create_ch_ib() 499 init_attr->cap.max_recv_wr = target->queue_size + 1; in srp_create_ch_ib() 500 init_attr->cap.max_recv_sge = 1; in srp_create_ch_ib() 501 init_attr->cap.max_send_sge = 1; in srp_create_ch_ib() 502 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; in srp_create_ch_ib() 503 init_attr->qp_type = IB_QPT_RC; in srp_create_ch_ib() [all …]
|
/drivers/infiniband/hw/hfi1/ |
D | qp.h | 92 struct ib_qp_init_attr *init_attr,
|