• Home
  • Raw
  • Download

Lines Matching refs:sqp

628 	struct mlx4_ib_sqp *sqp;  in create_qp_common()  local
677 sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp); in create_qp_common()
678 if (!sqp) in create_qp_common()
680 qp = &sqp->qp; in create_qp_common()
1226 static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr, in store_sqp_attrs() argument
1230 sqp->pkey_index = attr->pkey_index; in store_sqp_attrs()
1232 sqp->qkey = attr->qkey; in store_sqp_attrs()
1234 sqp->send_psn = attr->sq_psn; in store_sqp_attrs()
1983 static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, in build_sriov_qp0_header() argument
1987 struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device); in build_sriov_qp0_header()
2009 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) in build_sriov_qp0_header()
2012 ib_ud_header_init(send_size, 1, 0, 0, 0, 0, &sqp->ud_header); in build_sriov_qp0_header()
2014 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) { in build_sriov_qp0_header()
2015 sqp->ud_header.lrh.service_level = in build_sriov_qp0_header()
2017 sqp->ud_header.lrh.destination_lid = in build_sriov_qp0_header()
2019 sqp->ud_header.lrh.source_lid = in build_sriov_qp0_header()
2027 mlx->rlid = sqp->ud_header.lrh.destination_lid; in build_sriov_qp0_header()
2029 sqp->ud_header.lrh.virtual_lane = 0; in build_sriov_qp0_header()
2030 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); in build_sriov_qp0_header()
2031 ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); in build_sriov_qp0_header()
2032 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); in build_sriov_qp0_header()
2033 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) in build_sriov_qp0_header()
2034 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); in build_sriov_qp0_header()
2036 sqp->ud_header.bth.destination_qpn = in build_sriov_qp0_header()
2037 cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]); in build_sriov_qp0_header()
2039 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); in build_sriov_qp0_header()
2041 if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) in build_sriov_qp0_header()
2044 if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) in build_sriov_qp0_header()
2047 sqp->ud_header.deth.qkey = cpu_to_be32(qkey); in build_sriov_qp0_header()
2048 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn); in build_sriov_qp0_header()
2050 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; in build_sriov_qp0_header()
2051 sqp->ud_header.immediate_present = 0; in build_sriov_qp0_header()
2053 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); in build_sriov_qp0_header()
2065 memcpy(inl + 1, sqp->header_buf, header_size); in build_sriov_qp0_header()
2069 memcpy(inl + 1, sqp->header_buf, spc); in build_sriov_qp0_header()
2072 memcpy(inl + 1, sqp->header_buf + spc, header_size - spc); in build_sriov_qp0_header()
2106 static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, in build_mlx_header() argument
2109 struct ib_device *ib_dev = sqp->qp.ibqp.device; in build_mlx_header()
2130 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; in build_mlx_header()
2155 ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header); in build_mlx_header()
2158 sqp->ud_header.lrh.service_level = in build_mlx_header()
2160 sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid; in build_mlx_header()
2161 sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); in build_mlx_header()
2165 sqp->ud_header.grh.traffic_class = in build_mlx_header()
2167 sqp->ud_header.grh.flow_label = in build_mlx_header()
2169 sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit; in build_mlx_header()
2171 memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16); in build_mlx_header()
2177 sqp->ud_header.grh.source_gid.global.subnet_prefix = in build_mlx_header()
2178 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. in build_mlx_header()
2180 sqp->ud_header.grh.source_gid.global.interface_id = in build_mlx_header()
2181 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. in build_mlx_header()
2187 &sqp->ud_header.grh.source_gid); in build_mlx_header()
2189 memcpy(sqp->ud_header.grh.destination_gid.raw, in build_mlx_header()
2196 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | in build_mlx_header()
2197 (sqp->ud_header.lrh.destination_lid == in build_mlx_header()
2199 (sqp->ud_header.lrh.service_level << 8)); in build_mlx_header()
2202 mlx->rlid = sqp->ud_header.lrh.destination_lid; in build_mlx_header()
2207 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; in build_mlx_header()
2208 sqp->ud_header.immediate_present = 0; in build_mlx_header()
2211 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; in build_mlx_header()
2212 sqp->ud_header.immediate_present = 1; in build_mlx_header()
2213 sqp->ud_header.immediate_data = wr->ex.imm_data; in build_mlx_header()
2226 memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); in build_mlx_header()
2233 u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]); in build_mlx_header()
2237 memcpy(sqp->ud_header.eth.smac_h, smac, ETH_ALEN); in build_mlx_header()
2240 memcpy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac, ETH_ALEN); in build_mlx_header()
2243 if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) in build_mlx_header()
2246 sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); in build_mlx_header()
2248 sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); in build_mlx_header()
2249 sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp); in build_mlx_header()
2252 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; in build_mlx_header()
2253 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) in build_mlx_header()
2254 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; in build_mlx_header()
2256 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); in build_mlx_header()
2257 if (!sqp->qp.ibqp.qp_num) in build_mlx_header()
2258 ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); in build_mlx_header()
2260 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); in build_mlx_header()
2261 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); in build_mlx_header()
2262 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); in build_mlx_header()
2263 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); in build_mlx_header()
2264 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? in build_mlx_header()
2265 sqp->qkey : wr->wr.ud.remote_qkey); in build_mlx_header()
2266 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); in build_mlx_header()
2268 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); in build_mlx_header()
2276 be32_to_cpu(((__be32 *) sqp->header_buf)[i])); in build_mlx_header()
2293 memcpy(inl + 1, sqp->header_buf, header_size); in build_mlx_header()
2297 memcpy(inl + 1, sqp->header_buf, spc); in build_mlx_header()
2300 memcpy(inl + 1, sqp->header_buf + spc, header_size - spc); in build_mlx_header()