/drivers/infiniband/core/ |
D | iwcm.c | 75 struct iwcm_id_private *cm_id; member 133 list_add(&work->free_list, &work->cm_id->work_free_list); in put_work() 155 work->cm_id = cm_id_priv; in alloc_work_entries() 200 static void add_ref(struct iw_cm_id *cm_id) in add_ref() argument 203 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in add_ref() 207 static void rem_ref(struct iw_cm_id *cm_id) in rem_ref() argument 211 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in rem_ref() 216 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); 283 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) in iw_cm_disconnect() argument 290 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in iw_cm_disconnect() [all …]
|
D | ucma.c | 89 struct rdma_cm_id *cm_id; member 118 struct rdma_cm_id *cm_id; member 135 else if (ctx->file != file || !ctx->cm_id) in _ucma_find_context() 166 rdma_destroy_id(uevent_close->cm_id); in ucma_close_event_id() 181 rdma_destroy_id(ctx->cm_id); in ucma_close_id() 284 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id) in ucma_removal_event_handler() argument 286 struct ucma_context *ctx = cm_id->context; in ucma_removal_event_handler() 299 if (ctx->cm_id == cm_id) { in ucma_removal_event_handler() 308 if (con_req_eve->cm_id == cm_id && in ucma_removal_event_handler() 321 static int ucma_event_handler(struct rdma_cm_id *cm_id, in ucma_event_handler() argument [all …]
|
D | cm.c | 861 static void cm_destroy_id(struct ib_cm_id *cm_id, int err) in cm_destroy_id() argument 866 cm_id_priv = container_of(cm_id, struct cm_id_private, id); in cm_destroy_id() 869 switch (cm_id->state) { in cm_destroy_id() 884 cm_id->state = IB_CM_IDLE; in cm_destroy_id() 901 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, in cm_destroy_id() 913 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, in cm_destroy_id() 925 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, in cm_destroy_id() 932 ib_send_cm_dreq(cm_id, NULL, 0); in cm_destroy_id() 941 ib_send_cm_drep(cm_id, NULL, 0); in cm_destroy_id() 957 cm_free_id(cm_id->local_id); in cm_destroy_id() [all …]
|
D | ucm.c | 84 struct ib_cm_id *cm_id; member 96 struct ib_cm_id *cm_id; member 169 ib_destroy_cm_id(uevent->cm_id); in ib_ucm_cleanup_events() 348 static int ib_ucm_event_handler(struct ib_cm_id *cm_id, in ib_ucm_event_handler() argument 355 ctx = cm_id->context; in ib_ucm_event_handler() 362 uevent->cm_id = cm_id; in ib_ucm_event_handler() 423 ctx->cm_id = uevent->cm_id; in ib_ucm_event() 424 ctx->cm_id->context = ctx; in ib_ucm_event() 492 ctx->cm_id = ib_create_cm_id(file->device->ib_dev, in ib_ucm_create_id() 494 if (IS_ERR(ctx->cm_id)) { in ib_ucm_create_id() [all …]
|
D | cma.c | 294 } cm_id; member 965 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) in rdma_init_qp_attr() 968 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, in rdma_init_qp_attr() 974 if (!id_priv->cm_id.iw) { in rdma_init_qp_attr() 978 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, in rdma_init_qp_attr() 1419 const struct ib_cm_id *cm_id, in cma_find_listener() argument 1431 if (id_priv->id.device == cm_id->device && in cma_find_listener() 1437 if (id_priv_dev->id.device == cm_id->device && in cma_find_listener() 1447 static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, in cma_id_from_event() argument 1477 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); in cma_id_from_event() [all …]
|
/drivers/infiniband/hw/nes/ |
D | nes_cm.c | 178 if (!cm_node->cm_id) in create_event() 193 event->cm_info.cm_id = cm_node->cm_id; in create_event() 778 struct iw_cm_id *cm_id = cm_node->cm_id; in nes_retrans_expired() local 789 if (cm_node->cm_id) in nes_retrans_expired() 790 cm_id->rem_ref(cm_id); in nes_retrans_expired() 803 struct iw_cm_id *cm_id = cm_node->cm_id; in handle_recv_entry() local 812 if (nesqp->cm_id) { in handle_recv_entry() 816 "to do!!!\n", nesqp->hwqp.qp_id, cm_id, in handle_recv_entry() 828 "to do!!!\n", nesqp->hwqp.qp_id, cm_id, in handle_recv_entry() 835 if (cm_node->cm_id) in handle_recv_entry() [all …]
|
D | nes_cm.h | 298 struct iw_cm_id *cm_id; member 342 struct iw_cm_id *cm_id; member 361 struct iw_cm_id *cm_id; member
|
/drivers/nvme/target/ |
D | rdma.c | 84 struct rdma_cm_id *cm_id; member 447 return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); in nvmet_rdma_post_recv() 481 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, in nvmet_rdma_release_rsp() 482 queue->cm_id->port_num, rsp->req.sg, in nvmet_rdma_release_rsp() 528 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_queue_response() local 539 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, in nvmet_rdma_queue_response() 540 cm_id->port_num, NULL, &rsp->send_wr); in nvmet_rdma_queue_response() 550 if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { in nvmet_rdma_queue_response() 564 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, in nvmet_rdma_read_data_done() 565 queue->cm_id->port_num, rsp->req.sg, in nvmet_rdma_read_data_done() [all …]
|
/drivers/infiniband/hw/i40iw/ |
D | i40iw_cm.c | 233 struct iw_cm_id *cm_id, in i40iw_get_cmevent_info() argument 236 memcpy(&event->local_addr, &cm_id->m_local_addr, in i40iw_get_cmevent_info() 238 memcpy(&event->remote_addr, &cm_id->m_remote_addr, in i40iw_get_cmevent_info() 256 struct iw_cm_id *cm_id, in i40iw_send_cm_event() argument 276 i40iw_get_cmevent_info(cm_node, cm_id, &event); in i40iw_send_cm_event() 290 return cm_id->event_handler(cm_id, &event); in i40iw_send_cm_event() 303 if (!cm_node->cm_id) in i40iw_create_event() 317 event->cm_info.cm_id = cm_node->cm_id; in i40iw_create_event() 617 struct iw_cm_id *cm_id; in i40iw_event_connect_error() local 619 cm_id = event->cm_node->cm_id; in i40iw_event_connect_error() [all …]
|
D | i40iw_cm.h | 294 struct iw_cm_id *cm_id; member 336 struct iw_cm_id *cm_id; member 364 struct iw_cm_id *cm_id; member
|
D | i40iw_verbs.h | 141 struct iw_cm_id *cm_id; member
|
/drivers/infiniband/hw/cxgb3/ |
D | iwch_cm.h | 155 struct iw_cm_id *cm_id; member 196 static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id) in to_ep() argument 198 return cm_id->provider_data; in to_ep() 201 static inline struct iwch_listen_ep *to_listen_ep(struct iw_cm_id *cm_id) in to_listen_ep() argument 203 return cm_id->provider_data; in to_listen_ep() 217 int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); 218 int iwch_create_listen(struct iw_cm_id *cm_id, int backlog); 219 int iwch_destroy_listen(struct iw_cm_id *cm_id); 220 int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len); 221 int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
|
D | iwch_cm.c | 675 if (ep->com.cm_id) { in close_complete_upcall() 677 ep, ep->com.cm_id, ep->hwtid); in close_complete_upcall() 678 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in close_complete_upcall() 679 ep->com.cm_id->rem_ref(ep->com.cm_id); in close_complete_upcall() 680 ep->com.cm_id = NULL; in close_complete_upcall() 692 if (ep->com.cm_id) { in peer_close_upcall() 694 ep, ep->com.cm_id, ep->hwtid); in peer_close_upcall() 695 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_close_upcall() 707 if (ep->com.cm_id) { in peer_abort_upcall() 709 ep->com.cm_id, ep->hwtid); in peer_abort_upcall() [all …]
|
/drivers/infiniband/ulp/ipoib/ |
D | ipoib_cm.c | 78 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, 275 struct ib_cm_id *cm_id, struct ib_qp *qp, in ipoib_cm_modify_rx_qp() argument 283 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp() 294 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp() 315 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp() 348 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id, in ipoib_cm_nonsrq_init_rx() argument 378 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0); in ipoib_cm_nonsrq_init_rx() 421 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, in ipoib_cm_send_rep() argument 439 return ib_send_cm_rep(cm_id, &rep); in ipoib_cm_send_rep() 442 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) in ipoib_cm_req_handler() argument [all …]
|
/drivers/infiniband/hw/cxgb4/ |
D | cm.c | 156 epc->cm_id->rem_ref(epc->cm_id); in deref_cm_id() 157 epc->cm_id = NULL; in deref_cm_id() 164 epc->cm_id->add_ref(epc->cm_id); in ref_cm_id() 1224 if (ep->com.cm_id) { in close_complete_upcall() 1226 ep, ep->com.cm_id, ep->hwtid); in close_complete_upcall() 1227 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in close_complete_upcall() 1240 if (ep->com.cm_id) { in peer_close_upcall() 1242 ep, ep->com.cm_id, ep->hwtid); in peer_close_upcall() 1243 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_close_upcall() 1256 if (ep->com.cm_id) { in peer_abort_upcall() [all …]
|
D | device.c | 234 &qp->ep->com.cm_id->local_addr; in dump_qp() 236 &qp->ep->com.cm_id->remote_addr; in dump_qp() 238 &qp->ep->com.cm_id->m_local_addr; in dump_qp() 240 &qp->ep->com.cm_id->m_remote_addr; in dump_qp() 256 &qp->ep->com.cm_id->local_addr; in dump_qp() 258 &qp->ep->com.cm_id->remote_addr; in dump_qp() 261 &qp->ep->com.cm_id->m_local_addr; in dump_qp() 264 &qp->ep->com.cm_id->m_remote_addr; in dump_qp() 537 &ep->com.cm_id->local_addr; in dump_ep() 539 &ep->com.cm_id->remote_addr; in dump_ep() [all …]
|
D | iw_cxgb4.h | 833 struct iw_cm_id *cm_id; member 893 static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) in to_ep() argument 895 return cm_id->provider_data; in to_ep() 898 static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id) in to_listen_ep() argument 900 return cm_id->provider_data; in to_listen_ep() 949 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); 950 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog); 951 int c4iw_destroy_listen(struct iw_cm_id *cm_id); 952 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); 953 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
|
/drivers/nvme/host/ |
D | rdma.c | 100 struct rdma_cm_id *cm_id; member 163 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, 273 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr); in nvme_rdma_create_qp() 275 queue->qp = queue->cm_id->qp; in nvme_rdma_create_qp() 424 nvme_rdma_find_get_device(struct rdma_cm_id *cm_id) in nvme_rdma_find_get_device() argument 430 if (ndev->dev->node_guid == cm_id->device->node_guid && in nvme_rdma_find_get_device() 439 ndev->dev = cm_id->device; in nvme_rdma_find_get_device() 478 rdma_destroy_qp(queue->cm_id); in nvme_rdma_destroy_queue_ib() 558 queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, in nvme_rdma_init_queue() 560 if (IS_ERR(queue->cm_id)) { in nvme_rdma_init_queue() [all …]
|
/drivers/infiniband/ulp/isert/ |
D | ib_isert.c | 82 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); in isert_qp_event_callback() 529 isert_conn->cm_id = cma_id; in isert_connect_request() 582 if (isert_conn->cm_id && in isert_connect_release() 584 rdma_destroy_id(isert_conn->cm_id); in isert_connect_release() 646 struct isert_np *isert_np = isert_conn->cm_id->context; in isert_handle_unbound_conn() 684 err = rdma_disconnect(isert_conn->cm_id); in isert_conn_terminate() 699 isert_np->cm_id = NULL; in isert_np_cma_handler() 702 isert_np->cm_id = isert_setup_id(isert_np); in isert_np_cma_handler() 703 if (IS_ERR(isert_np->cm_id)) { in isert_np_cma_handler() 705 isert_np, PTR_ERR(isert_np->cm_id)); in isert_np_cma_handler() [all …]
|
D | ib_isert.h | 154 struct rdma_cm_id *cm_id; member 195 struct rdma_cm_id *cm_id; member
|
/drivers/infiniband/ulp/srpt/ |
D | ib_srpt.c | 202 event->event, ch->cm_id, ch->sess_name, ch->state); in srpt_qp_event() 206 ib_cm_notify(ch->cm_id, event->event); in srpt_qp_event() 1025 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); in srpt_ch_qp_rtr() 1055 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); in srpt_ch_qp_rts() 1433 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess); in srpt_handle_tsk_mgmt() 1667 qp_init->cap.max_send_wr, ch->cm_id); in srpt_create_ch_ib() 1749 ret = ib_send_cm_dreq(ch->cm_id, NULL, 0); in srpt_disconnect_ch() 1751 ret = ib_send_cm_drep(ch->cm_id, NULL, 0); in srpt_disconnect_ch() 1804 ib_destroy_cm_id(ch->cm_id); in srpt_release_channel_work() 1829 static int srpt_cm_req_recv(struct ib_cm_id *cm_id, in srpt_cm_req_recv() argument [all …]
|
D | ib_srpt.h | 265 struct ib_cm_id *cm_id; member 354 struct ib_cm_id *cm_id; member
|
/drivers/infiniband/hw/mlx4/ |
D | cm.c | 75 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id) in set_local_comm_id() argument 80 msg->request_id = cpu_to_be32(cm_id); in set_local_comm_id() 86 msg->local_comm_id = cpu_to_be32(cm_id); in set_local_comm_id() 105 static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id) in set_remote_comm_id() argument 110 msg->request_id = cpu_to_be32(cm_id); in set_remote_comm_id() 116 msg->remote_comm_id = cpu_to_be32(cm_id); in set_remote_comm_id()
|
/drivers/char/ |
D | mbcs.h | 79 union cm_id { union 145 uint64_t cm_id:2, // 1:0 member 342 union cm_id id;
|
/drivers/infiniband/ulp/srp/ |
D | ib_srp.c | 142 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 306 if (ch->cm_id) in srp_new_cm_id() 307 ib_destroy_cm_id(ch->cm_id); in srp_new_cm_id() 308 ch->cm_id = new_cm_id; in srp_new_cm_id() 586 if (ch->cm_id) { in srp_free_ch_ib() 587 ib_destroy_cm_id(ch->cm_id); in srp_free_ch_ib() 588 ch->cm_id = NULL; in srp_free_ch_ib() 780 status = ib_send_cm_req(ch->cm_id, &req->param); in srp_send_req() 814 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) { in srp_disconnect_target() 2298 static void srp_cm_rep_handler(struct ib_cm_id *cm_id, in srp_cm_rep_handler() argument [all …]
|