/drivers/infiniband/hw/cxgb3/ |
D | iwch_cq.c | 45 struct ib_wc *wc) in iwch_poll_cq_one() argument 81 wc->wr_id = cookie; in iwch_poll_cq_one() 82 wc->qp = &qhp->ibqp; in iwch_poll_cq_one() 83 wc->vendor_err = CQE_STATUS(cqe); in iwch_poll_cq_one() 84 wc->wc_flags = 0; in iwch_poll_cq_one() 94 wc->byte_len = CQE_LEN(cqe); in iwch_poll_cq_one() 96 wc->byte_len = 0; in iwch_poll_cq_one() 97 wc->opcode = IB_WC_RECV; in iwch_poll_cq_one() 100 wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe); in iwch_poll_cq_one() 101 wc->wc_flags |= IB_WC_WITH_INVALIDATE; in iwch_poll_cq_one() [all …]
|
/drivers/infiniband/hw/qib/ |
D | qib_cq.c | 50 struct qib_cq_wc *wc; in qib_cq_enter() local 61 wc = cq->queue; in qib_cq_enter() 62 head = wc->head; in qib_cq_enter() 68 if (unlikely(next == wc->tail)) { in qib_cq_enter() 81 wc->uqueue[head].wr_id = entry->wr_id; in qib_cq_enter() 82 wc->uqueue[head].status = entry->status; in qib_cq_enter() 83 wc->uqueue[head].opcode = entry->opcode; in qib_cq_enter() 84 wc->uqueue[head].vendor_err = entry->vendor_err; in qib_cq_enter() 85 wc->uqueue[head].byte_len = entry->byte_len; in qib_cq_enter() 86 wc->uqueue[head].ex.imm_data = in qib_cq_enter() [all …]
|
D | qib_ud.c | 58 struct ib_wc wc; in qib_ud_loopback() local 123 memset(&wc, 0, sizeof wc); in qib_ud_loopback() 124 wc.byte_len = length + sizeof(struct ib_grh); in qib_ud_loopback() 127 wc.wc_flags = IB_WC_WITH_IMM; in qib_ud_loopback() 128 wc.ex.imm_data = swqe->wr.ex.imm_data; in qib_ud_loopback() 153 if (unlikely(wc.byte_len > qp->r_len)) { in qib_ud_loopback() 162 wc.wc_flags |= IB_WC_GRH; in qib_ud_loopback() 200 wc.wr_id = qp->r_wr_id; in qib_ud_loopback() 201 wc.status = IB_WC_SUCCESS; in qib_ud_loopback() 202 wc.opcode = IB_WC_RECV; in qib_ud_loopback() [all …]
|
D | qib_ruc.c | 84 struct ib_wc wc; in qib_init_sge() local 116 memset(&wc, 0, sizeof(wc)); in qib_init_sge() 117 wc.wr_id = wqe->wr_id; in qib_init_sge() 118 wc.status = IB_WC_LOC_PROT_ERR; in qib_init_sge() 119 wc.opcode = IB_WC_RECV; in qib_init_sge() 120 wc.qp = &qp->ibqp; in qib_init_sge() 122 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in qib_init_sge() 362 struct ib_wc wc; in qib_ruc_loopback() local 423 memset(&wc, 0, sizeof wc); in qib_ruc_loopback() 433 wc.wc_flags = IB_WC_WITH_IMM; in qib_ruc_loopback() [all …]
|
D | qib_uc.c | 250 struct ib_wc wc; in qib_uc_rcv() local 381 wc.ex.imm_data = ohdr->u.imm_data; in qib_uc_rcv() 383 wc.wc_flags = IB_WC_WITH_IMM; in qib_uc_rcv() 387 wc.ex.imm_data = 0; in qib_uc_rcv() 388 wc.wc_flags = 0; in qib_uc_rcv() 398 wc.byte_len = tlen + qp->r_rcv_len; in qib_uc_rcv() 399 if (unlikely(wc.byte_len > qp->r_len)) in qib_uc_rcv() 401 wc.opcode = IB_WC_RECV; in qib_uc_rcv() 405 wc.wr_id = qp->r_wr_id; in qib_uc_rcv() 406 wc.status = IB_WC_SUCCESS; in qib_uc_rcv() [all …]
|
D | qib_rc.c | 973 struct ib_wc wc; in qib_rc_send_complete() local 1020 memset(&wc, 0, sizeof wc); in qib_rc_send_complete() 1021 wc.wr_id = wqe->wr.wr_id; in qib_rc_send_complete() 1022 wc.status = IB_WC_SUCCESS; in qib_rc_send_complete() 1023 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; in qib_rc_send_complete() 1024 wc.byte_len = wqe->length; in qib_rc_send_complete() 1025 wc.qp = &qp->ibqp; in qib_rc_send_complete() 1026 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); in qib_rc_send_complete() 1058 struct ib_wc wc; in do_rc_completion() local 1076 memset(&wc, 0, sizeof wc); in do_rc_completion() [all …]
|
/drivers/infiniband/hw/ipath/ |
D | ipath_cq.c | 50 struct ipath_cq_wc *wc; in ipath_cq_enter() local 61 wc = cq->queue; in ipath_cq_enter() 62 head = wc->head; in ipath_cq_enter() 68 if (unlikely(next == wc->tail)) { in ipath_cq_enter() 81 wc->uqueue[head].wr_id = entry->wr_id; in ipath_cq_enter() 82 wc->uqueue[head].status = entry->status; in ipath_cq_enter() 83 wc->uqueue[head].opcode = entry->opcode; in ipath_cq_enter() 84 wc->uqueue[head].vendor_err = entry->vendor_err; in ipath_cq_enter() 85 wc->uqueue[head].byte_len = entry->byte_len; in ipath_cq_enter() 86 wc->uqueue[head].ex.imm_data = (__u32 __force) entry->ex.imm_data; in ipath_cq_enter() [all …]
|
D | ipath_ud.c | 63 struct ib_wc wc; in ipath_ud_loopback() local 93 memset(&wc, 0, sizeof wc); in ipath_ud_loopback() 94 wc.byte_len = length + sizeof(struct ib_grh); in ipath_ud_loopback() 97 wc.wc_flags = IB_WC_WITH_IMM; in ipath_ud_loopback() 98 wc.ex.imm_data = swqe->wr.ex.imm_data; in ipath_ud_loopback() 141 if (wc.byte_len > rlen) { in ipath_ud_loopback() 149 wc.wr_id = wqe->wr_id; in ipath_ud_loopback() 181 wc.wc_flags |= IB_WC_GRH; in ipath_ud_loopback() 213 wc.status = IB_WC_SUCCESS; in ipath_ud_loopback() 214 wc.opcode = IB_WC_RECV; in ipath_ud_loopback() [all …]
|
D | ipath_ruc.c | 127 struct ib_wc wc; in ipath_init_sge() local 145 memset(&wc, 0, sizeof(wc)); in ipath_init_sge() 146 wc.wr_id = wqe->wr_id; in ipath_init_sge() 147 wc.status = IB_WC_LOC_PROT_ERR; in ipath_init_sge() 148 wc.opcode = IB_WC_RECV; in ipath_init_sge() 149 wc.qp = &qp->ibqp; in ipath_init_sge() 151 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in ipath_init_sge() 267 struct ib_wc wc; in ipath_ruc_loopback() local 325 memset(&wc, 0, sizeof wc); in ipath_ruc_loopback() 334 wc.wc_flags = IB_WC_WITH_IMM; in ipath_ruc_loopback() [all …]
|
D | ipath_uc.c | 248 struct ib_wc wc; in ipath_uc_rcv() local 285 memset(&wc, 0, sizeof wc); in ipath_uc_rcv() 382 wc.ex.imm_data = *(__be32 *) data; in ipath_uc_rcv() 386 wc.ex.imm_data = ohdr->u.imm_data; in ipath_uc_rcv() 389 wc.wc_flags = IB_WC_WITH_IMM; in ipath_uc_rcv() 404 wc.byte_len = tlen + qp->r_rcv_len; in ipath_uc_rcv() 405 if (unlikely(wc.byte_len > qp->r_len)) { in ipath_uc_rcv() 410 wc.opcode = IB_WC_RECV; in ipath_uc_rcv() 413 wc.wr_id = qp->r_wr_id; in ipath_uc_rcv() 414 wc.status = IB_WC_SUCCESS; in ipath_uc_rcv() [all …]
|
D | ipath_rc.c | 861 struct ib_wc wc; in do_rc_ack() local 951 memset(&wc, 0, sizeof wc); in do_rc_ack() 952 wc.wr_id = wqe->wr.wr_id; in do_rc_ack() 953 wc.status = IB_WC_SUCCESS; in do_rc_ack() 954 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; in do_rc_ack() 955 wc.byte_len = wqe->length; in do_rc_ack() 956 wc.qp = &qp->ibqp; in do_rc_ack() 957 wc.src_qp = qp->remote_qpn; in do_rc_ack() 958 wc.slid = qp->remote_ah_attr.dlid; in do_rc_ack() 959 wc.sl = qp->remote_ah_attr.sl; in do_rc_ack() [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | cq.c | 487 struct ib_wc *wc) in mlx4_ib_handle_error_cqe() argument 501 wc->status = IB_WC_LOC_LEN_ERR; in mlx4_ib_handle_error_cqe() 504 wc->status = IB_WC_LOC_QP_OP_ERR; in mlx4_ib_handle_error_cqe() 507 wc->status = IB_WC_LOC_PROT_ERR; in mlx4_ib_handle_error_cqe() 510 wc->status = IB_WC_WR_FLUSH_ERR; in mlx4_ib_handle_error_cqe() 513 wc->status = IB_WC_MW_BIND_ERR; in mlx4_ib_handle_error_cqe() 516 wc->status = IB_WC_BAD_RESP_ERR; in mlx4_ib_handle_error_cqe() 519 wc->status = IB_WC_LOC_ACCESS_ERR; in mlx4_ib_handle_error_cqe() 522 wc->status = IB_WC_REM_INV_REQ_ERR; in mlx4_ib_handle_error_cqe() 525 wc->status = IB_WC_REM_ACCESS_ERR; in mlx4_ib_handle_error_cqe() [all …]
|
D | mad.c | 454 enum ib_qp_type dest_qpt, struct ib_wc *wc, in mlx4_ib_send_to_slave() argument 492 ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey); in mlx4_ib_send_to_slave() 543 tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12); in mlx4_ib_send_to_slave() 544 tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid); in mlx4_ib_send_to_slave() 545 tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF); in mlx4_ib_send_to_slave() 546 tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0; in mlx4_ib_send_to_slave() 576 struct ib_wc *wc, struct ib_grh *grh, in mlx4_ib_demux_mad() argument 596 if (wc->wc_flags & IB_WC_GRH) { in mlx4_ib_demux_mad() 633 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); in mlx4_ib_demux_mad() 1177 static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc) in mlx4_ib_multiplex_mad() argument [all …]
|
/drivers/net/ethernet/brocade/bna/ |
D | bfa_cs.h | 94 bfa_wc_up(struct bfa_wc *wc) in bfa_wc_up() argument 96 wc->wc_count++; in bfa_wc_up() 100 bfa_wc_down(struct bfa_wc *wc) in bfa_wc_down() argument 102 wc->wc_count--; in bfa_wc_down() 103 if (wc->wc_count == 0) in bfa_wc_down() 104 wc->wc_resume(wc->wc_cbarg); in bfa_wc_down() 109 bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) in bfa_wc_init() argument 111 wc->wc_resume = wc_resume; in bfa_wc_init() 112 wc->wc_cbarg = wc_cbarg; in bfa_wc_init() 113 wc->wc_count = 0; in bfa_wc_init() [all …]
|
/drivers/infiniband/hw/ehca/ |
D | ehca_reqs.c | 625 static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) in ehca_poll_cq_one() argument 705 wc->qp = &my_qp->ib_qp; in ehca_poll_cq_one() 743 wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id); in ehca_poll_cq_one() 758 wc->opcode = ib_wc_opcode[cqe->optype]-1; in ehca_poll_cq_one() 759 if (unlikely(wc->opcode == -1)) { in ehca_poll_cq_one() 773 map_ib_wc_status(cqe->status, &wc->status); in ehca_poll_cq_one() 774 wc->vendor_err = wc->status; in ehca_poll_cq_one() 776 wc->status = IB_WC_SUCCESS; in ehca_poll_cq_one() 778 wc->byte_len = cqe->nr_bytes_transferred; in ehca_poll_cq_one() 779 wc->pkey_index = cqe->pkey_index; in ehca_poll_cq_one() [all …]
|
/drivers/scsi/bfa/ |
D | bfa_cs.h | 293 bfa_wc_up(struct bfa_wc_s *wc) in bfa_wc_up() argument 295 wc->wc_count++; in bfa_wc_up() 299 bfa_wc_down(struct bfa_wc_s *wc) in bfa_wc_down() argument 301 wc->wc_count--; in bfa_wc_down() 302 if (wc->wc_count == 0) in bfa_wc_down() 303 wc->wc_resume(wc->wc_cbarg); in bfa_wc_down() 310 bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) in bfa_wc_init() argument 312 wc->wc_resume = wc_resume; in bfa_wc_init() 313 wc->wc_cbarg = wc_cbarg; in bfa_wc_init() 314 wc->wc_count = 0; in bfa_wc_init() [all …]
|
/drivers/infiniband/hw/cxgb4/ |
D | cq.c | 565 static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) in c4iw_poll_cq_one() argument 591 wc->wr_id = cookie; in c4iw_poll_cq_one() 592 wc->qp = &qhp->ibqp; in c4iw_poll_cq_one() 593 wc->vendor_err = CQE_STATUS(&cqe); in c4iw_poll_cq_one() 594 wc->wc_flags = 0; in c4iw_poll_cq_one() 603 wc->byte_len = CQE_LEN(&cqe); in c4iw_poll_cq_one() 605 wc->byte_len = 0; in c4iw_poll_cq_one() 606 wc->opcode = IB_WC_RECV; in c4iw_poll_cq_one() 609 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); in c4iw_poll_cq_one() 610 wc->wc_flags |= IB_WC_WITH_INVALIDATE; in c4iw_poll_cq_one() [all …]
|
/drivers/infiniband/ulp/ipoib/ |
D | ipoib_ib.c | 228 static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) in ipoib_ib_handle_rx_wc() argument 231 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; in ipoib_ib_handle_rx_wc() 237 wr_id, wc->status); in ipoib_ib_handle_rx_wc() 247 if (unlikely(wc->status != IB_WC_SUCCESS)) { in ipoib_ib_handle_rx_wc() 248 if (wc->status != IB_WC_WR_FLUSH_ERR) in ipoib_ib_handle_rx_wc() 251 wc->status, wr_id, wc->vendor_err); in ipoib_ib_handle_rx_wc() 262 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) in ipoib_ib_handle_rx_wc() 278 wc->byte_len, wc->slid); in ipoib_ib_handle_rx_wc() 281 ipoib_ud_skb_put_frags(priv, skb, wc->byte_len); in ipoib_ib_handle_rx_wc() 286 if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff) in ipoib_ib_handle_rx_wc() [all …]
|
D | ipoib_cm.c | 556 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) in ipoib_cm_handle_rx_wc() argument 560 unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV); in ipoib_cm_handle_rx_wc() 570 wr_id, wc->status); in ipoib_cm_handle_rx_wc() 585 p = wc->qp->qp_context; in ipoib_cm_handle_rx_wc() 592 if (unlikely(wc->status != IB_WC_SUCCESS)) { in ipoib_cm_handle_rx_wc() 595 wc->status, wr_id, wc->vendor_err); in ipoib_cm_handle_rx_wc() 622 if (wc->byte_len < IPOIB_CM_COPYBREAK) { in ipoib_cm_handle_rx_wc() 623 int dlen = wc->byte_len; in ipoib_cm_handle_rx_wc() 639 frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len, in ipoib_cm_handle_rx_wc() 657 wc->byte_len, wc->slid); in ipoib_cm_handle_rx_wc() [all …]
|
/drivers/infiniband/ulp/iser/ |
D | iser_verbs.c | 855 struct ib_wc wc; in iser_drain_tx_cq() local 860 while (ib_poll_cq(cq, 1, &wc) == 1) { in iser_drain_tx_cq() 861 tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id; in iser_drain_tx_cq() 862 ib_conn = wc.qp->qp_context; in iser_drain_tx_cq() 863 if (wc.status == IB_WC_SUCCESS) { in iser_drain_tx_cq() 864 if (wc.opcode == IB_WC_SEND) in iser_drain_tx_cq() 868 IB_WC_SEND, wc.opcode); in iser_drain_tx_cq() 871 wc.wr_id, wc.status, wc.vendor_err); in iser_drain_tx_cq() 887 struct ib_wc wc; in iser_cq_tasklet_fn() local 894 while (ib_poll_cq(cq, 1, &wc) == 1) { in iser_cq_tasklet_fn() [all …]
|
/drivers/infiniband/core/ |
D | mad.c | 656 struct ib_wc *wc) in build_smp_wc() argument 658 memset(wc, 0, sizeof *wc); in build_smp_wc() 659 wc->wr_id = wr_id; in build_smp_wc() 660 wc->status = IB_WC_SUCCESS; in build_smp_wc() 661 wc->opcode = IB_WC_RECV; in build_smp_wc() 662 wc->pkey_index = pkey_index; in build_smp_wc() 663 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); in build_smp_wc() 664 wc->src_qp = IB_QP0; in build_smp_wc() 665 wc->qp = qp; in build_smp_wc() 666 wc->slid = slid; in build_smp_wc() [all …]
|
D | mad_rmpp.c | 140 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, in ack_recv() 141 recv_wc->wc->pkey_index, 1, hdr_len, in ack_recv() 160 ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, in alloc_response_msg() 166 msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, in alloc_response_msg() 167 recv_wc->wc->pkey_index, 1, in alloc_response_msg() 291 mad_recv_wc->wc, in create_rmpp_recv() 314 rmpp_recv->src_qp = mad_recv_wc->wc->src_qp; in create_rmpp_recv() 315 rmpp_recv->slid = mad_recv_wc->wc->slid; in create_rmpp_recv() 334 rmpp_recv->src_qp == mad_recv_wc->wc->src_qp && in find_rmpp_recv() 335 rmpp_recv->slid == mad_recv_wc->wc->slid && in find_rmpp_recv() [all …]
|
D | agent.c | 82 struct ib_wc *wc, struct ib_device *device, in agent_send_response() argument 102 ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); in agent_send_response() 109 send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0, in agent_send_response()
|
/drivers/net/wireless/hostap/ |
D | hostap_cs.c | 108 u8 *buf, int wc) in hfa384x_outsw_debug() argument 117 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTSW, a, wc); in hfa384x_outsw_debug() 118 outsw(dev->base_addr + a, buf, wc); in hfa384x_outsw_debug() 123 u8 *buf, int wc) in hfa384x_insw_debug() argument 132 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INSW, a, wc); in hfa384x_insw_debug() 133 insw(dev->base_addr + a, buf, wc); in hfa384x_insw_debug() 141 #define HFA384X_OUTSW(a, buf, wc) hfa384x_outsw_debug(dev, (a), (buf), (wc)) argument 142 #define HFA384X_INSW(a, buf, wc) hfa384x_insw_debug(dev, (a), (buf), (wc)) argument 150 #define HFA384X_INSW(a, buf, wc) insw(dev->base_addr + (a), buf, wc) argument 151 #define HFA384X_OUTSW(a, buf, wc) outsw(dev->base_addr + (a), buf, wc) argument
|
D | hostap_plx.c | 174 u8 *buf, int wc) in hfa384x_outsw_debug() argument 184 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTSW, a, wc); in hfa384x_outsw_debug() 185 outsw(dev->base_addr + a, buf, wc); in hfa384x_outsw_debug() 190 u8 *buf, int wc) in hfa384x_insw_debug() argument 200 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INSW, a, wc); in hfa384x_insw_debug() 201 insw(dev->base_addr + a, buf, wc); in hfa384x_insw_debug() 209 #define HFA384X_OUTSW(a, buf, wc) hfa384x_outsw_debug(dev, (a), (buf), (wc)) argument 210 #define HFA384X_INSW(a, buf, wc) hfa384x_insw_debug(dev, (a), (buf), (wc)) argument 218 #define HFA384X_INSW(a, buf, wc) insw(dev->base_addr + (a), buf, wc) argument 219 #define HFA384X_OUTSW(a, buf, wc) outsw(dev->base_addr + (a), buf, wc) argument
|