Lines Matching refs:sqp
298 static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr, in store_attrs() argument
302 sqp->pkey_index = attr->pkey_index; in store_attrs()
304 sqp->qkey = attr->qkey; in store_attrs()
306 sqp->send_psn = attr->sq_psn; in store_attrs()
1351 struct mthca_sqp *sqp) in mthca_alloc_sqp() argument
1356 sqp->qp.transport = MLX; in mthca_alloc_sqp()
1357 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); in mthca_alloc_sqp()
1361 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; in mthca_alloc_sqp()
1362 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size, in mthca_alloc_sqp()
1363 &sqp->header_dma, GFP_KERNEL); in mthca_alloc_sqp()
1364 if (!sqp->header_buf) in mthca_alloc_sqp()
1371 mthca_array_set(&dev->qp_table.qp, mqpn, sqp); in mthca_alloc_sqp()
1377 sqp->qp.port = port; in mthca_alloc_sqp()
1378 sqp->qp.qpn = mqpn; in mthca_alloc_sqp()
1379 sqp->qp.transport = MLX; in mthca_alloc_sqp()
1382 send_policy, &sqp->qp); in mthca_alloc_sqp()
1404 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, in mthca_alloc_sqp()
1405 sqp->header_buf, sqp->header_dma); in mthca_alloc_sqp()
1478 static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, in build_mlx_header() argument
1489 &sqp->ud_header); in build_mlx_header()
1491 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); in build_mlx_header()
1495 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | in build_mlx_header()
1496 (sqp->ud_header.lrh.destination_lid == in build_mlx_header()
1498 (sqp->ud_header.lrh.service_level << 8)); in build_mlx_header()
1499 mlx->rlid = sqp->ud_header.lrh.destination_lid; in build_mlx_header()
1504 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; in build_mlx_header()
1505 sqp->ud_header.immediate_present = 0; in build_mlx_header()
1508 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; in build_mlx_header()
1509 sqp->ud_header.immediate_present = 1; in build_mlx_header()
1510 sqp->ud_header.immediate_data = wr->ex.imm_data; in build_mlx_header()
1516 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; in build_mlx_header()
1517 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) in build_mlx_header()
1518 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; in build_mlx_header()
1519 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); in build_mlx_header()
1520 if (!sqp->qp.ibqp.qp_num) in build_mlx_header()
1521 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, in build_mlx_header()
1522 sqp->pkey_index, &pkey); in build_mlx_header()
1524 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, in build_mlx_header()
1526 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); in build_mlx_header()
1527 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); in build_mlx_header()
1528 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); in build_mlx_header()
1529 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? in build_mlx_header()
1530 sqp->qkey : wr->wr.ud.remote_qkey); in build_mlx_header()
1531 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); in build_mlx_header()
1533 header_size = ib_ud_header_pack(&sqp->ud_header, in build_mlx_header()
1534 sqp->header_buf + in build_mlx_header()
1538 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); in build_mlx_header()
1539 data->addr = cpu_to_be64(sqp->header_dma + in build_mlx_header()