/drivers/staging/rdma/amso1100/ |
D | c2_cm.c | 41 int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) in c2_llp_connect() argument 43 struct c2_dev *c2dev = to_c2dev(cm_id->device); in c2_llp_connect() 49 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr; in c2_llp_connect() 51 if (cm_id->remote_addr.ss_family != AF_INET) in c2_llp_connect() 54 ibqp = c2_get_qp(cm_id->device, iw_param->qpn); in c2_llp_connect() 60 cm_id->provider_data = qp; in c2_llp_connect() 61 cm_id->add_ref(cm_id); in c2_llp_connect() 62 qp->cm_id = cm_id; in c2_llp_connect() 128 cm_id->provider_data = NULL; in c2_llp_connect() 129 qp->cm_id = NULL; in c2_llp_connect() [all …]
|
D | c2_ae.c | 187 struct iw_cm_id *cm_id = qp->cm_id; in c2_ae_event() local 190 if (!cm_id) { in c2_ae_event() 221 if (qp->cm_id) { in c2_ae_event() 222 qp->cm_id->rem_ref(qp->cm_id); in c2_ae_event() 223 qp->cm_id = NULL; in c2_ae_event() 229 if (cm_id->event_handler) in c2_ae_event() 230 cm_id->event_handler(cm_id, &cm_event); in c2_ae_event() 247 BUG_ON(cm_id->event_handler==(void*)0x6b6b6b6b); in c2_ae_event() 250 if (qp->cm_id) { in c2_ae_event() 251 qp->cm_id->rem_ref(qp->cm_id); in c2_ae_event() [all …]
|
D | c2_qp.c | 174 if (qp->cm_id && qp->state == IB_QPS_RTS) { in c2_qp_modify() 176 "qp=%p, cm_id=%p\n",qp,qp->cm_id); in c2_qp_modify() 178 vq_req->cm_id = qp->cm_id; in c2_qp_modify() 236 if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) { in c2_qp_modify() 237 qp->cm_id->rem_ref(qp->cm_id); in c2_qp_modify() 238 qp->cm_id = NULL; in c2_qp_modify() 332 if (qp->cm_id && qp->state == IB_QPS_RTS) { in destroy_qp() 334 "qp=%p, cm_id=%p\n",qp,qp->cm_id); in destroy_qp() 337 vq_req->cm_id = qp->cm_id; in destroy_qp() 369 if (qp->cm_id) { in destroy_qp() [all …]
|
D | c2_intr.c | 200 cm_event.local_addr = req->cm_id->local_addr; in handle_vq() 201 cm_event.remote_addr = req->cm_id->remote_addr; in handle_vq() 204 req->cm_id->event_handler(req->cm_id, &cm_event); in handle_vq()
|
D | c2_provider.c | 605 static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) in c2_connect() argument 610 return c2_llp_connect(cm_id, iw_param); in c2_connect() 613 static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) in c2_accept() argument 618 return c2_llp_accept(cm_id, iw_param); in c2_accept() 621 static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) in c2_reject() argument 627 err = c2_llp_reject(cm_id, pdata, pdata_len); in c2_reject() 631 static int c2_service_create(struct iw_cm_id *cm_id, int backlog) in c2_service_create() argument 636 err = c2_llp_service_create(cm_id, backlog); in c2_service_create() 643 static int c2_service_destroy(struct iw_cm_id *cm_id) in c2_service_destroy() argument 648 err = c2_llp_service_destroy(cm_id); in c2_service_destroy()
|
D | c2.h | 521 extern int c2_llp_connect(struct iw_cm_id *cm_id, 523 extern int c2_llp_accept(struct iw_cm_id *cm_id, 525 extern int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, 527 extern int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog); 528 extern int c2_llp_service_destroy(struct iw_cm_id *cm_id);
|
/drivers/infiniband/core/ |
D | iwcm.c | 63 struct iwcm_id_private *cm_id; member 121 list_add(&work->free_list, &work->cm_id->work_free_list); in put_work() 145 work->cm_id = cm_id_priv; in alloc_work_entries() 191 static void add_ref(struct iw_cm_id *cm_id) in add_ref() argument 194 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in add_ref() 198 static void rem_ref(struct iw_cm_id *cm_id) in rem_ref() argument 203 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in rem_ref() 216 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); 283 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) in iw_cm_disconnect() argument 290 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in iw_cm_disconnect() [all …]
|
D | ucma.c | 91 struct rdma_cm_id *cm_id; member 119 struct rdma_cm_id *cm_id; member 138 else if (ctx->file != file || !ctx->cm_id) in _ucma_find_context() 169 rdma_destroy_id(uevent_close->cm_id); in ucma_close_event_id() 184 rdma_destroy_id(ctx->cm_id); in ucma_close_id() 287 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id) in ucma_removal_event_handler() argument 289 struct ucma_context *ctx = cm_id->context; in ucma_removal_event_handler() 302 if (ctx->cm_id == cm_id) { in ucma_removal_event_handler() 311 if (con_req_eve->cm_id == cm_id && in ucma_removal_event_handler() 324 static int ucma_event_handler(struct rdma_cm_id *cm_id, in ucma_event_handler() argument [all …]
|
D | cm.c | 861 static void cm_destroy_id(struct ib_cm_id *cm_id, int err) in cm_destroy_id() argument 866 cm_id_priv = container_of(cm_id, struct cm_id_private, id); in cm_destroy_id() 869 switch (cm_id->state) { in cm_destroy_id() 884 cm_id->state = IB_CM_IDLE; in cm_destroy_id() 901 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, in cm_destroy_id() 913 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, in cm_destroy_id() 925 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, in cm_destroy_id() 932 ib_send_cm_dreq(cm_id, NULL, 0); in cm_destroy_id() 941 ib_send_cm_drep(cm_id, NULL, 0); in cm_destroy_id() 957 cm_free_id(cm_id->local_id); in cm_destroy_id() [all …]
|
D | ucm.c | 86 struct ib_cm_id *cm_id; member 98 struct ib_cm_id *cm_id; member 171 ib_destroy_cm_id(uevent->cm_id); in ib_ucm_cleanup_events() 350 static int ib_ucm_event_handler(struct ib_cm_id *cm_id, in ib_ucm_event_handler() argument 357 ctx = cm_id->context; in ib_ucm_event_handler() 364 uevent->cm_id = cm_id; in ib_ucm_event_handler() 425 ctx->cm_id = uevent->cm_id; in ib_ucm_event() 426 ctx->cm_id->context = ctx; in ib_ucm_event() 494 ctx->cm_id = ib_create_cm_id(file->device->ib_dev, in ib_ucm_create_id() 496 if (IS_ERR(ctx->cm_id)) { in ib_ucm_create_id() [all …]
|
D | cma.c | 220 } cm_id; member 848 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) in rdma_init_qp_attr() 851 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, in rdma_init_qp_attr() 857 if (!id_priv->cm_id.iw) { in rdma_init_qp_attr() 861 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, in rdma_init_qp_attr() 1299 const struct ib_cm_id *cm_id, in cma_find_listener() argument 1311 if (id_priv->id.device == cm_id->device && in cma_find_listener() 1317 if (id_priv_dev->id.device == cm_id->device && in cma_find_listener() 1327 static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, in cma_id_from_event() argument 1357 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); in cma_id_from_event() [all …]
|
/drivers/infiniband/hw/nes/ |
D | nes_cm.c | 178 if (!cm_node->cm_id) in create_event() 193 event->cm_info.cm_id = cm_node->cm_id; in create_event() 897 struct iw_cm_id *cm_id = cm_node->cm_id; in nes_retrans_expired() local 908 if (cm_node->cm_id) in nes_retrans_expired() 909 cm_id->rem_ref(cm_id); in nes_retrans_expired() 922 struct iw_cm_id *cm_id = cm_node->cm_id; in handle_recv_entry() local 931 if (nesqp->cm_id) { in handle_recv_entry() 935 "to do!!!\n", nesqp->hwqp.qp_id, cm_id, in handle_recv_entry() 947 "to do!!!\n", nesqp->hwqp.qp_id, cm_id, in handle_recv_entry() 954 if (cm_node->cm_id) in handle_recv_entry() [all …]
|
D | nes_cm.h | 298 struct iw_cm_id *cm_id; member 344 struct iw_cm_id *cm_id; member 363 struct iw_cm_id *cm_id; member
|
/drivers/infiniband/hw/cxgb3/ |
D | iwch_cm.h | 155 struct iw_cm_id *cm_id; member 196 static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id) in to_ep() argument 198 return cm_id->provider_data; in to_ep() 201 static inline struct iwch_listen_ep *to_listen_ep(struct iw_cm_id *cm_id) in to_listen_ep() argument 203 return cm_id->provider_data; in to_listen_ep() 217 int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); 218 int iwch_create_listen(struct iw_cm_id *cm_id, int backlog); 219 int iwch_destroy_listen(struct iw_cm_id *cm_id); 220 int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len); 221 int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
|
D | iwch_cm.c | 675 if (ep->com.cm_id) { in close_complete_upcall() 677 ep, ep->com.cm_id, ep->hwtid); in close_complete_upcall() 678 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in close_complete_upcall() 679 ep->com.cm_id->rem_ref(ep->com.cm_id); in close_complete_upcall() 680 ep->com.cm_id = NULL; in close_complete_upcall() 692 if (ep->com.cm_id) { in peer_close_upcall() 694 ep, ep->com.cm_id, ep->hwtid); in peer_close_upcall() 695 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_close_upcall() 707 if (ep->com.cm_id) { in peer_abort_upcall() 709 ep->com.cm_id, ep->hwtid); in peer_abort_upcall() [all …]
|
/drivers/infiniband/hw/cxgb4/ |
D | cm.c | 1160 if (ep->com.cm_id) { in close_complete_upcall() 1162 ep, ep->com.cm_id, ep->hwtid); in close_complete_upcall() 1163 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in close_complete_upcall() 1164 ep->com.cm_id->rem_ref(ep->com.cm_id); in close_complete_upcall() 1165 ep->com.cm_id = NULL; in close_complete_upcall() 1185 if (ep->com.cm_id) { in peer_close_upcall() 1187 ep, ep->com.cm_id, ep->hwtid); in peer_close_upcall() 1188 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_close_upcall() 1201 if (ep->com.cm_id) { in peer_abort_upcall() 1203 ep->com.cm_id, ep->hwtid); in peer_abort_upcall() [all …]
|
D | iw_cxgb4.h | 785 struct iw_cm_id *cm_id; member 885 static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) in to_ep() argument 887 return cm_id->provider_data; in to_ep() 890 static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id) in to_listen_ep() argument 892 return cm_id->provider_data; in to_listen_ep() 952 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); 953 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog); 954 int c4iw_destroy_listen(struct iw_cm_id *cm_id); 955 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); 956 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
|
/drivers/infiniband/ulp/srpt/ |
D | ib_srpt.c | 227 event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch)); in srpt_qp_event() 231 ib_cm_notify(ch->cm_id, event->event); in srpt_qp_event() 990 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); in srpt_ch_qp_rtr() 1020 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); in srpt_ch_qp_rts() 1807 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess); in srpt_handle_tsk_mgmt() 2094 qp_init->cap.max_send_wr, ch->cm_id); in srpt_create_ch_ib() 2159 ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0, in __srpt_close_ch() 2163 if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0) in __srpt_close_ch() 2220 static void srpt_drain_channel(struct ib_cm_id *cm_id) in srpt_drain_channel() argument 2229 sdev = cm_id->context; in srpt_drain_channel() [all …]
|
D | ib_srpt.h | 304 struct ib_cm_id *cm_id; member 397 struct ib_cm_id *cm_id; member
|
/drivers/infiniband/ulp/ipoib/ |
D | ipoib_cm.c | 79 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, 275 struct ib_cm_id *cm_id, struct ib_qp *qp, in ipoib_cm_modify_rx_qp() argument 283 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp() 294 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp() 315 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp() 348 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id, in ipoib_cm_nonsrq_init_rx() argument 378 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0); in ipoib_cm_nonsrq_init_rx() 421 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, in ipoib_cm_send_rep() argument 439 return ib_send_cm_rep(cm_id, &rep); in ipoib_cm_send_rep() 442 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) in ipoib_cm_req_handler() argument [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | cm.c | 75 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id) in set_local_comm_id() argument 80 msg->request_id = cpu_to_be32(cm_id); in set_local_comm_id() 86 msg->local_comm_id = cpu_to_be32(cm_id); in set_local_comm_id() 105 static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id) in set_remote_comm_id() argument 110 msg->request_id = cpu_to_be32(cm_id); in set_remote_comm_id() 116 msg->remote_comm_id = cpu_to_be32(cm_id); in set_remote_comm_id()
|
/drivers/infiniband/ulp/isert/ |
D | ib_isert.c | 89 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); in isert_qp_event_callback() 702 isert_conn->cm_id = cma_id; in isert_connect_request() 762 if (isert_conn->cm_id) in isert_connect_release() 763 rdma_destroy_id(isert_conn->cm_id); in isert_connect_release() 822 struct isert_np *isert_np = isert_conn->cm_id->context; in isert_handle_unbound_conn() 861 err = rdma_disconnect(isert_conn->cm_id); in isert_conn_terminate() 879 isert_np->cm_id = NULL; in isert_np_cma_handler() 882 isert_np->cm_id = isert_setup_id(isert_np); in isert_np_cma_handler() 883 if (IS_ERR(isert_np->cm_id)) { in isert_np_cma_handler() 885 isert_np, PTR_ERR(isert_np->cm_id)); in isert_np_cma_handler() [all …]
|
D | ib_isert.h | 165 struct rdma_cm_id *cm_id; member 221 struct rdma_cm_id *cm_id; member
|
/drivers/char/ |
D | mbcs.h | 79 union cm_id { union 145 uint64_t cm_id:2, // 1:0 member 342 union cm_id id;
|
/drivers/infiniband/ulp/srp/ |
D | ib_srp.c | 137 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 301 if (ch->cm_id) in srp_new_cm_id() 302 ib_destroy_cm_id(ch->cm_id); in srp_new_cm_id() 303 ch->cm_id = new_cm_id; in srp_new_cm_id() 609 if (ch->cm_id) { in srp_free_ch_ib() 610 ib_destroy_cm_id(ch->cm_id); in srp_free_ch_ib() 611 ch->cm_id = NULL; in srp_free_ch_ib() 802 status = ib_send_cm_req(ch->cm_id, &req->param); in srp_send_req() 836 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) { in srp_disconnect_target() 2254 static void srp_cm_rep_handler(struct ib_cm_id *cm_id, in srp_cm_rep_handler() argument [all …]
|