• Home
  • Raw
  • Download

Lines Matching +full:no +full:- +full:memory +full:- +full:wc

14  *      - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
51 conn->c_version = version; in rds_ib_set_protocol()
59 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_set_flow_control()
63 ic->i_flowctl = 1; in rds_ib_set_flow_control()
66 ic->i_flowctl = 0; in rds_ib_set_flow_control()
72 * low timeout, but not the absolute minimum - this should
76 * smallest infinite number :-) above.
81 * Caller passes in a qp_attr pointer - don't waste stack spacv
89 attr->min_rnr_timer = IB_RNR_TIMER_000_32; in rds_ib_tune_rnr()
90 ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER); in rds_ib_tune_rnr()
92 printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret); in rds_ib_tune_rnr()
101 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_cm_connect_complete()
110 dp = event->param.conn.private_data; in rds_ib_cm_connect_complete()
111 if (conn->c_isv6) { in rds_ib_cm_connect_complete()
112 if (event->param.conn.private_data_len >= in rds_ib_cm_connect_complete()
114 major = dp->ricp_v6.dp_protocol_major; in rds_ib_cm_connect_complete()
115 minor = dp->ricp_v6.dp_protocol_minor; in rds_ib_cm_connect_complete()
116 credit = dp->ricp_v6.dp_credit; in rds_ib_cm_connect_complete()
118 * aligned. Since dp_ack_seq is 64-bit extended load in rds_ib_cm_connect_complete()
122 ack_seq = get_unaligned(&dp->ricp_v6.dp_ack_seq); in rds_ib_cm_connect_complete()
124 } else if (event->param.conn.private_data_len >= in rds_ib_cm_connect_complete()
126 major = dp->ricp_v4.dp_protocol_major; in rds_ib_cm_connect_complete()
127 minor = dp->ricp_v4.dp_protocol_minor; in rds_ib_cm_connect_complete()
128 credit = dp->ricp_v4.dp_credit; in rds_ib_cm_connect_complete()
129 ack_seq = get_unaligned(&dp->ricp_v4.dp_ack_seq); in rds_ib_cm_connect_complete()
138 if (conn->c_version < RDS_PROTOCOL_VERSION) { in rds_ib_cm_connect_complete()
139 if (conn->c_version != RDS_PROTOCOL_COMPAT_VERSION) { in rds_ib_cm_connect_complete()
140 pr_notice("RDS/IB: Connection <%pI6c,%pI6c> version %u.%u no longer supported\n", in rds_ib_cm_connect_complete()
141 &conn->c_laddr, &conn->c_faddr, in rds_ib_cm_connect_complete()
142 RDS_PROTOCOL_MAJOR(conn->c_version), in rds_ib_cm_connect_complete()
143 RDS_PROTOCOL_MINOR(conn->c_version)); in rds_ib_cm_connect_complete()
150 ic->i_active_side ? "Active" : "Passive", in rds_ib_cm_connect_complete()
151 &conn->c_laddr, &conn->c_faddr, conn->c_tos, in rds_ib_cm_connect_complete()
152 RDS_PROTOCOL_MAJOR(conn->c_version), in rds_ib_cm_connect_complete()
153 RDS_PROTOCOL_MINOR(conn->c_version), in rds_ib_cm_connect_complete()
154 ic->i_flowctl ? ", flow control" : ""); in rds_ib_cm_connect_complete()
157 ic->i_sl = ic->i_cm_id->route.path_rec->sl; in rds_ib_cm_connect_complete()
159 atomic_set(&ic->i_cq_quiesce, 0); in rds_ib_cm_connect_complete()
167 /* Post receive buffers - as a side effect, this will update in rds_ib_cm_connect_complete()
175 err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE); in rds_ib_cm_connect_complete()
180 err = rds_ib_update_ipaddr(ic->rds_ibdev, &conn->c_laddr); in rds_ib_cm_connect_complete()
193 conn->c_proposed_version = conn->c_version; in rds_ib_cm_connect_complete()
205 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_cm_fill_conn_param()
206 struct rds_ib_device *rds_ibdev = ic->rds_ibdev; in rds_ib_cm_fill_conn_param()
210 conn_param->responder_resources = in rds_ib_cm_fill_conn_param()
211 min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources); in rds_ib_cm_fill_conn_param()
212 conn_param->initiator_depth = in rds_ib_cm_fill_conn_param()
213 min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth); in rds_ib_cm_fill_conn_param()
214 conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7); in rds_ib_cm_fill_conn_param()
215 conn_param->rnr_retry_count = 7; in rds_ib_cm_fill_conn_param()
220 dp->ricp_v6.dp_saddr = conn->c_laddr; in rds_ib_cm_fill_conn_param()
221 dp->ricp_v6.dp_daddr = conn->c_faddr; in rds_ib_cm_fill_conn_param()
222 dp->ricp_v6.dp_protocol_major = in rds_ib_cm_fill_conn_param()
224 dp->ricp_v6.dp_protocol_minor = in rds_ib_cm_fill_conn_param()
226 dp->ricp_v6.dp_protocol_minor_mask = in rds_ib_cm_fill_conn_param()
228 dp->ricp_v6.dp_ack_seq = in rds_ib_cm_fill_conn_param()
230 dp->ricp_v6.dp_cmn.ricpc_dp_toss = conn->c_tos; in rds_ib_cm_fill_conn_param()
232 conn_param->private_data = &dp->ricp_v6; in rds_ib_cm_fill_conn_param()
233 conn_param->private_data_len = sizeof(dp->ricp_v6); in rds_ib_cm_fill_conn_param()
235 dp->ricp_v4.dp_saddr = conn->c_laddr.s6_addr32[3]; in rds_ib_cm_fill_conn_param()
236 dp->ricp_v4.dp_daddr = conn->c_faddr.s6_addr32[3]; in rds_ib_cm_fill_conn_param()
237 dp->ricp_v4.dp_protocol_major = in rds_ib_cm_fill_conn_param()
239 dp->ricp_v4.dp_protocol_minor = in rds_ib_cm_fill_conn_param()
241 dp->ricp_v4.dp_protocol_minor_mask = in rds_ib_cm_fill_conn_param()
243 dp->ricp_v4.dp_ack_seq = in rds_ib_cm_fill_conn_param()
245 dp->ricp_v4.dp_cmn.ricpc_dp_toss = conn->c_tos; in rds_ib_cm_fill_conn_param()
247 conn_param->private_data = &dp->ricp_v4; in rds_ib_cm_fill_conn_param()
248 conn_param->private_data_len = sizeof(dp->ricp_v4); in rds_ib_cm_fill_conn_param()
252 if (ic->i_flowctl) { in rds_ib_cm_fill_conn_param()
256 (atomic_read(&ic->i_credits)); in rds_ib_cm_fill_conn_param()
258 dp->ricp_v6.dp_credit = cpu_to_be32(credits); in rds_ib_cm_fill_conn_param()
260 dp->ricp_v4.dp_credit = cpu_to_be32(credits); in rds_ib_cm_fill_conn_param()
262 &ic->i_credits); in rds_ib_cm_fill_conn_param()
270 event->event, ib_event_msg(event->event), data); in rds_ib_cq_event_handler()
284 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_cq_comp_handler_recv()
290 tasklet_schedule(&ic->i_recv_tasklet); in rds_ib_cq_comp_handler_recv()
297 struct ib_wc *wc; in poll_scq() local
301 wc = wcs + i; in poll_scq()
302 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", in poll_scq()
303 (unsigned long long)wc->wr_id, wc->status, in poll_scq()
304 wc->byte_len, be32_to_cpu(wc->ex.imm_data)); in poll_scq()
306 if (wc->wr_id <= ic->i_send_ring.w_nr || in poll_scq()
307 wc->wr_id == RDS_IB_ACK_WR_ID) in poll_scq()
308 rds_ib_send_cqe_handler(ic, wc); in poll_scq()
310 rds_ib_mr_cqe_handler(ic, wc); in poll_scq()
319 struct rds_connection *conn = ic->conn; in rds_ib_tasklet_fn_send()
324 if (atomic_read(&ic->i_cq_quiesce)) in rds_ib_tasklet_fn_send()
327 poll_scq(ic, ic->i_send_cq, ic->i_send_wc); in rds_ib_tasklet_fn_send()
328 ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); in rds_ib_tasklet_fn_send()
329 poll_scq(ic, ic->i_send_cq, ic->i_send_wc); in rds_ib_tasklet_fn_send()
332 (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) || in rds_ib_tasklet_fn_send()
333 test_bit(0, &conn->c_map_queued))) in rds_ib_tasklet_fn_send()
334 rds_send_xmit(&ic->conn->c_path[0]); in rds_ib_tasklet_fn_send()
342 struct ib_wc *wc; in poll_rcq() local
346 wc = wcs + i; in poll_rcq()
347 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", in poll_rcq()
348 (unsigned long long)wc->wr_id, wc->status, in poll_rcq()
349 wc->byte_len, be32_to_cpu(wc->ex.imm_data)); in poll_rcq()
351 rds_ib_recv_cqe_handler(ic, wc, ack_state); in poll_rcq()
359 struct rds_connection *conn = ic->conn; in rds_ib_tasklet_fn_recv()
360 struct rds_ib_device *rds_ibdev = ic->rds_ibdev; in rds_ib_tasklet_fn_recv()
369 if (atomic_read(&ic->i_cq_quiesce)) in rds_ib_tasklet_fn_recv()
373 poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); in rds_ib_tasklet_fn_recv()
374 ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); in rds_ib_tasklet_fn_recv()
375 poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); in rds_ib_tasklet_fn_recv()
379 if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) { in rds_ib_tasklet_fn_recv()
381 ic->i_ack_recv = state.ack_recv; in rds_ib_tasklet_fn_recv()
391 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_qp_event_handler()
393 rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event, in rds_ib_qp_event_handler()
394 ib_event_msg(event->event)); in rds_ib_qp_event_handler()
396 switch (event->event) { in rds_ib_qp_event_handler()
398 rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); in rds_ib_qp_event_handler()
401 rdsdebug("Fatal QP Event %u (%s) - connection %pI6c->%pI6c, reconnecting\n", in rds_ib_qp_event_handler()
402 event->event, ib_event_msg(event->event), in rds_ib_qp_event_handler()
403 &conn->c_laddr, &conn->c_faddr); in rds_ib_qp_event_handler()
412 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_cq_comp_handler_send()
418 tasklet_schedule(&ic->i_send_tasklet); in rds_ib_cq_comp_handler_send()
423 int min = rds_ibdev->vector_load[rds_ibdev->dev->num_comp_vectors - 1]; in ibdev_get_unused_vector()
424 int index = rds_ibdev->dev->num_comp_vectors - 1; in ibdev_get_unused_vector()
427 for (i = rds_ibdev->dev->num_comp_vectors - 1; i >= 0; i--) { in ibdev_get_unused_vector()
428 if (rds_ibdev->vector_load[i] < min) { in ibdev_get_unused_vector()
430 min = rds_ibdev->vector_load[i]; in ibdev_get_unused_vector()
434 rds_ibdev->vector_load[index]++; in ibdev_get_unused_vector()
440 rds_ibdev->vector_load[index]--; in ibdev_put_vector()
469 /* Free the DMA memory used to store struct rds_header.
472 * @hdrs: pointer to the array storing DMA memory pointers
483 rds_dma_hdr_free(dev->dev, hdrs[i], dma_addrs[i], dir); in rds_dma_hdrs_free()
489 /* Allocate DMA coherent memory to be used to store struct rds_header for
490 * sending/receiving packets. The pointers to the DMA memory and the
497 * It returns the pointer to the array storing the DMA memory pointers. On
509 ibdev_to_node(dev->dev)); in rds_dma_hdrs_alloc()
514 ibdev_to_node(dev->dev)); in rds_dma_hdrs_alloc()
521 hdrs[i] = rds_dma_hdr_alloc(dev->dev, &hdr_daddrs[i], dir); in rds_dma_hdrs_alloc()
538 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_setup_qp()
539 struct ib_device *dev = ic->i_cm_id->device; in rds_ib_setup_qp()
552 return -EOPNOTSUPP; in rds_ib_setup_qp()
563 max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_send_wr + 1 ? in rds_ib_setup_qp()
564 rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_send_wr; in rds_ib_setup_qp()
565 if (ic->i_send_ring.w_nr != max_wrs) in rds_ib_setup_qp()
566 rds_ib_ring_resize(&ic->i_send_ring, max_wrs); in rds_ib_setup_qp()
568 max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_recv_wr + 1 ? in rds_ib_setup_qp()
569 rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_recv_wr; in rds_ib_setup_qp()
570 if (ic->i_recv_ring.w_nr != max_wrs) in rds_ib_setup_qp()
571 rds_ib_ring_resize(&ic->i_recv_ring, max_wrs); in rds_ib_setup_qp()
573 /* Protection domain and memory range */ in rds_ib_setup_qp()
574 ic->i_pd = rds_ibdev->pd; in rds_ib_setup_qp()
576 ic->i_scq_vector = ibdev_get_unused_vector(rds_ibdev); in rds_ib_setup_qp()
577 cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1; in rds_ib_setup_qp()
578 cq_attr.comp_vector = ic->i_scq_vector; in rds_ib_setup_qp()
579 ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send, in rds_ib_setup_qp()
582 if (IS_ERR(ic->i_send_cq)) { in rds_ib_setup_qp()
583 ret = PTR_ERR(ic->i_send_cq); in rds_ib_setup_qp()
584 ic->i_send_cq = NULL; in rds_ib_setup_qp()
585 ibdev_put_vector(rds_ibdev, ic->i_scq_vector); in rds_ib_setup_qp()
590 ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev); in rds_ib_setup_qp()
591 cq_attr.cqe = ic->i_recv_ring.w_nr; in rds_ib_setup_qp()
592 cq_attr.comp_vector = ic->i_rcq_vector; in rds_ib_setup_qp()
593 ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv, in rds_ib_setup_qp()
596 if (IS_ERR(ic->i_recv_cq)) { in rds_ib_setup_qp()
597 ret = PTR_ERR(ic->i_recv_cq); in rds_ib_setup_qp()
598 ic->i_recv_cq = NULL; in rds_ib_setup_qp()
599 ibdev_put_vector(rds_ibdev, ic->i_rcq_vector); in rds_ib_setup_qp()
604 ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); in rds_ib_setup_qp()
610 ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); in rds_ib_setup_qp()
621 attr.cap.max_send_wr = ic->i_send_ring.w_nr + fr_queue_space + 1; in rds_ib_setup_qp()
622 attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1; in rds_ib_setup_qp()
623 attr.cap.max_send_sge = rds_ibdev->max_sge; in rds_ib_setup_qp()
627 attr.send_cq = ic->i_send_cq; in rds_ib_setup_qp()
628 attr.recv_cq = ic->i_recv_cq; in rds_ib_setup_qp()
634 ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr); in rds_ib_setup_qp()
640 ic->i_send_hdrs = rds_dma_hdrs_alloc(rds_ibdev, &ic->i_send_hdrs_dma, in rds_ib_setup_qp()
641 ic->i_send_ring.w_nr, in rds_ib_setup_qp()
643 if (!ic->i_send_hdrs) { in rds_ib_setup_qp()
644 ret = -ENOMEM; in rds_ib_setup_qp()
649 ic->i_recv_hdrs = rds_dma_hdrs_alloc(rds_ibdev, &ic->i_recv_hdrs_dma, in rds_ib_setup_qp()
650 ic->i_recv_ring.w_nr, in rds_ib_setup_qp()
652 if (!ic->i_recv_hdrs) { in rds_ib_setup_qp()
653 ret = -ENOMEM; in rds_ib_setup_qp()
658 ic->i_ack = rds_dma_hdr_alloc(rds_ibdev->dev, &ic->i_ack_dma, in rds_ib_setup_qp()
660 if (!ic->i_ack) { in rds_ib_setup_qp()
661 ret = -ENOMEM; in rds_ib_setup_qp()
666 ic->i_sends = vzalloc_node(array_size(sizeof(struct rds_ib_send_work), in rds_ib_setup_qp()
667 ic->i_send_ring.w_nr), in rds_ib_setup_qp()
669 if (!ic->i_sends) { in rds_ib_setup_qp()
670 ret = -ENOMEM; in rds_ib_setup_qp()
675 ic->i_recvs = vzalloc_node(array_size(sizeof(struct rds_ib_recv_work), in rds_ib_setup_qp()
676 ic->i_recv_ring.w_nr), in rds_ib_setup_qp()
678 if (!ic->i_recvs) { in rds_ib_setup_qp()
679 ret = -ENOMEM; in rds_ib_setup_qp()
686 rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd, in rds_ib_setup_qp()
687 ic->i_send_cq, ic->i_recv_cq); in rds_ib_setup_qp()
692 vfree(ic->i_sends); in rds_ib_setup_qp()
695 rds_dma_hdr_free(rds_ibdev->dev, ic->i_ack, ic->i_ack_dma, in rds_ib_setup_qp()
697 ic->i_ack = NULL; in rds_ib_setup_qp()
700 rds_dma_hdrs_free(rds_ibdev, ic->i_recv_hdrs, ic->i_recv_hdrs_dma, in rds_ib_setup_qp()
701 ic->i_recv_ring.w_nr, DMA_FROM_DEVICE); in rds_ib_setup_qp()
702 ic->i_recv_hdrs = NULL; in rds_ib_setup_qp()
703 ic->i_recv_hdrs_dma = NULL; in rds_ib_setup_qp()
706 rds_dma_hdrs_free(rds_ibdev, ic->i_send_hdrs, ic->i_send_hdrs_dma, in rds_ib_setup_qp()
707 ic->i_send_ring.w_nr, DMA_TO_DEVICE); in rds_ib_setup_qp()
708 ic->i_send_hdrs = NULL; in rds_ib_setup_qp()
709 ic->i_send_hdrs_dma = NULL; in rds_ib_setup_qp()
712 rdma_destroy_qp(ic->i_cm_id); in rds_ib_setup_qp()
714 ib_destroy_cq(ic->i_recv_cq); in rds_ib_setup_qp()
715 ic->i_recv_cq = NULL; in rds_ib_setup_qp()
717 ib_destroy_cq(ic->i_send_cq); in rds_ib_setup_qp()
718 ic->i_send_cq = NULL; in rds_ib_setup_qp()
729 const union rds_ib_conn_priv *dp = event->param.conn.private_data; in rds_ib_protocol_compatible()
736 * rdma_cm private data is odd - when there is any private data in the in rds_ib_protocol_compatible()
741 * from an older version. This could be 3.0 or 2.0 - we can't tell. in rds_ib_protocol_compatible()
742 * We really should have changed this for OFED 1.3 :-( in rds_ib_protocol_compatible()
746 if (!event->param.conn.private_data_len) { in rds_ib_protocol_compatible()
747 printk(KERN_NOTICE "RDS incoming connection has no private data, " in rds_ib_protocol_compatible()
754 major = dp->ricp_v6.dp_protocol_major; in rds_ib_protocol_compatible()
755 minor = dp->ricp_v6.dp_protocol_minor; in rds_ib_protocol_compatible()
756 mask = dp->ricp_v6.dp_protocol_minor_mask; in rds_ib_protocol_compatible()
759 major = dp->ricp_v4.dp_protocol_major; in rds_ib_protocol_compatible()
760 minor = dp->ricp_v4.dp_protocol_minor; in rds_ib_protocol_compatible()
761 mask = dp->ricp_v4.dp_protocol_minor_mask; in rds_ib_protocol_compatible()
764 /* Even if len is crap *now* I still want to check it. -ASG */ in rds_ib_protocol_compatible()
765 if (event->param.conn.private_data_len < data_len || major == 0) in rds_ib_protocol_compatible()
779 &dp->ricp_v6.dp_saddr, major, minor); in rds_ib_protocol_compatible()
782 &dp->ricp_v4.dp_saddr, major, minor); in rds_ib_protocol_compatible()
806 idx = dev->ifindex; in __rds_find_ifindex()
819 __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id; in rds_ib_cm_handle_connect()
820 __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id; in rds_ib_cm_handle_connect()
843 dp = event->param.conn.private_data; in rds_ib_cm_handle_connect()
846 dp_cmn = &dp->ricp_v6.dp_cmn; in rds_ib_cm_handle_connect()
847 saddr6 = &dp->ricp_v6.dp_saddr; in rds_ib_cm_handle_connect()
848 daddr6 = &dp->ricp_v6.dp_daddr; in rds_ib_cm_handle_connect()
856 /* No index found... Need to bail out. */ in rds_ib_cm_handle_connect()
858 err = -EOPNOTSUPP; in rds_ib_cm_handle_connect()
864 /* No index found... Need to bail out. */ in rds_ib_cm_handle_connect()
866 err = -EOPNOTSUPP; in rds_ib_cm_handle_connect()
871 err = -EOPNOTSUPP; in rds_ib_cm_handle_connect()
875 dp_cmn = &dp->ricp_v4.dp_cmn; in rds_ib_cm_handle_connect()
876 ipv6_addr_set_v4mapped(dp->ricp_v4.dp_saddr, &s_mapped_addr); in rds_ib_cm_handle_connect()
877 ipv6_addr_set_v4mapped(dp->ricp_v4.dp_daddr, &d_mapped_addr); in rds_ib_cm_handle_connect()
886 (unsigned long long)be64_to_cpu(fguid), dp_cmn->ricpc_dp_toss); in rds_ib_cm_handle_connect()
890 &rds_ib_transport, dp_cmn->ricpc_dp_toss, in rds_ib_cm_handle_connect()
902 * by both hosts, we have a random backoff mechanism - in rds_ib_cm_handle_connect()
905 mutex_lock(&conn->c_cm_lock); in rds_ib_cm_handle_connect()
913 /* Wait and see - our connect may still be succeeding */ in rds_ib_cm_handle_connect()
919 ic = conn->c_transport_data; in rds_ib_cm_handle_connect()
922 rds_ib_set_flow_control(conn, be32_to_cpu(dp_cmn->ricpc_credit)); in rds_ib_cm_handle_connect()
926 if (dp_cmn->ricpc_ack_seq) in rds_ib_cm_handle_connect()
927 rds_send_drop_acked(conn, be64_to_cpu(dp_cmn->ricpc_ack_seq), in rds_ib_cm_handle_connect()
930 BUG_ON(cm_id->context); in rds_ib_cm_handle_connect()
931 BUG_ON(ic->i_cm_id); in rds_ib_cm_handle_connect()
933 ic->i_cm_id = cm_id; in rds_ib_cm_handle_connect()
934 cm_id->context = conn; in rds_ib_cm_handle_connect()
947 event->param.conn.responder_resources, in rds_ib_cm_handle_connect()
948 event->param.conn.initiator_depth, isv6); in rds_ib_cm_handle_connect()
956 mutex_unlock(&conn->c_cm_lock); in rds_ib_cm_handle_connect()
966 struct rds_connection *conn = cm_id->context; in rds_ib_cm_initiate_connect()
967 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_cm_initiate_connect()
975 ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */ in rds_ib_cm_initiate_connect()
984 conn->c_proposed_version, in rds_ib_cm_initiate_connect()
992 /* Beware - returning non-zero tells the rdma_cm to destroy in rds_ib_cm_initiate_connect()
996 if (ic->i_cm_id == cm_id) in rds_ib_cm_initiate_connect()
999 ic->i_active_side = true; in rds_ib_cm_initiate_connect()
1005 struct rds_connection *conn = cp->cp_conn; in rds_ib_conn_path_connect()
1011 ic = conn->c_transport_data; in rds_ib_conn_path_connect()
1016 if (conn->c_isv6) in rds_ib_conn_path_connect()
1021 ic->i_cm_id = rdma_create_id(&init_net, handler, conn, in rds_ib_conn_path_connect()
1023 if (IS_ERR(ic->i_cm_id)) { in rds_ib_conn_path_connect()
1024 ret = PTR_ERR(ic->i_cm_id); in rds_ib_conn_path_connect()
1025 ic->i_cm_id = NULL; in rds_ib_conn_path_connect()
1030 rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn); in rds_ib_conn_path_connect()
1032 if (ipv6_addr_v4mapped(&conn->c_faddr)) { in rds_ib_conn_path_connect()
1036 sin->sin_family = AF_INET; in rds_ib_conn_path_connect()
1037 sin->sin_addr.s_addr = conn->c_laddr.s6_addr32[3]; in rds_ib_conn_path_connect()
1038 sin->sin_port = 0; in rds_ib_conn_path_connect()
1041 sin->sin_family = AF_INET; in rds_ib_conn_path_connect()
1042 sin->sin_addr.s_addr = conn->c_faddr.s6_addr32[3]; in rds_ib_conn_path_connect()
1043 sin->sin_port = htons(RDS_PORT); in rds_ib_conn_path_connect()
1048 sin6->sin6_family = AF_INET6; in rds_ib_conn_path_connect()
1049 sin6->sin6_addr = conn->c_laddr; in rds_ib_conn_path_connect()
1050 sin6->sin6_port = 0; in rds_ib_conn_path_connect()
1051 sin6->sin6_scope_id = conn->c_dev_if; in rds_ib_conn_path_connect()
1054 sin6->sin6_family = AF_INET6; in rds_ib_conn_path_connect()
1055 sin6->sin6_addr = conn->c_faddr; in rds_ib_conn_path_connect()
1056 sin6->sin6_port = htons(RDS_CM_PORT); in rds_ib_conn_path_connect()
1057 sin6->sin6_scope_id = conn->c_dev_if; in rds_ib_conn_path_connect()
1060 ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src, in rds_ib_conn_path_connect()
1064 rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id, in rds_ib_conn_path_connect()
1066 rdma_destroy_id(ic->i_cm_id); in rds_ib_conn_path_connect()
1067 ic->i_cm_id = NULL; in rds_ib_conn_path_connect()
1081 struct rds_connection *conn = cp->cp_conn; in rds_ib_conn_path_shutdown()
1082 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_conn_path_shutdown()
1085 rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id, in rds_ib_conn_path_shutdown()
1086 ic->i_pd, ic->i_send_cq, ic->i_recv_cq, in rds_ib_conn_path_shutdown()
1087 ic->i_cm_id ? ic->i_cm_id->qp : NULL); in rds_ib_conn_path_shutdown()
1089 if (ic->i_cm_id) { in rds_ib_conn_path_shutdown()
1090 rdsdebug("disconnecting cm %p\n", ic->i_cm_id); in rds_ib_conn_path_shutdown()
1091 err = rdma_disconnect(ic->i_cm_id); in rds_ib_conn_path_shutdown()
1097 ic->i_cm_id, err); in rds_ib_conn_path_shutdown()
1111 * sends to complete we're ensured that there will be no in rds_ib_conn_path_shutdown()
1115 rds_ib_ring_empty(&ic->i_recv_ring) && in rds_ib_conn_path_shutdown()
1116 (atomic_read(&ic->i_signaled_sends) == 0) && in rds_ib_conn_path_shutdown()
1117 (atomic_read(&ic->i_fastreg_inuse_count) == 0) && in rds_ib_conn_path_shutdown()
1118 (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR)); in rds_ib_conn_path_shutdown()
1119 tasklet_kill(&ic->i_send_tasklet); in rds_ib_conn_path_shutdown()
1120 tasklet_kill(&ic->i_recv_tasklet); in rds_ib_conn_path_shutdown()
1122 atomic_set(&ic->i_cq_quiesce, 1); in rds_ib_conn_path_shutdown()
1125 if (ic->i_cm_id->qp) in rds_ib_conn_path_shutdown()
1126 rdma_destroy_qp(ic->i_cm_id); in rds_ib_conn_path_shutdown()
1127 if (ic->i_send_cq) { in rds_ib_conn_path_shutdown()
1128 if (ic->rds_ibdev) in rds_ib_conn_path_shutdown()
1129 ibdev_put_vector(ic->rds_ibdev, ic->i_scq_vector); in rds_ib_conn_path_shutdown()
1130 ib_destroy_cq(ic->i_send_cq); in rds_ib_conn_path_shutdown()
1133 if (ic->i_recv_cq) { in rds_ib_conn_path_shutdown()
1134 if (ic->rds_ibdev) in rds_ib_conn_path_shutdown()
1135 ibdev_put_vector(ic->rds_ibdev, ic->i_rcq_vector); in rds_ib_conn_path_shutdown()
1136 ib_destroy_cq(ic->i_recv_cq); in rds_ib_conn_path_shutdown()
1139 if (ic->rds_ibdev) { in rds_ib_conn_path_shutdown()
1141 if (ic->i_send_hdrs) { in rds_ib_conn_path_shutdown()
1142 rds_dma_hdrs_free(ic->rds_ibdev, in rds_ib_conn_path_shutdown()
1143 ic->i_send_hdrs, in rds_ib_conn_path_shutdown()
1144 ic->i_send_hdrs_dma, in rds_ib_conn_path_shutdown()
1145 ic->i_send_ring.w_nr, in rds_ib_conn_path_shutdown()
1147 ic->i_send_hdrs = NULL; in rds_ib_conn_path_shutdown()
1148 ic->i_send_hdrs_dma = NULL; in rds_ib_conn_path_shutdown()
1151 if (ic->i_recv_hdrs) { in rds_ib_conn_path_shutdown()
1152 rds_dma_hdrs_free(ic->rds_ibdev, in rds_ib_conn_path_shutdown()
1153 ic->i_recv_hdrs, in rds_ib_conn_path_shutdown()
1154 ic->i_recv_hdrs_dma, in rds_ib_conn_path_shutdown()
1155 ic->i_recv_ring.w_nr, in rds_ib_conn_path_shutdown()
1157 ic->i_recv_hdrs = NULL; in rds_ib_conn_path_shutdown()
1158 ic->i_recv_hdrs_dma = NULL; in rds_ib_conn_path_shutdown()
1161 if (ic->i_ack) { in rds_ib_conn_path_shutdown()
1162 rds_dma_hdr_free(ic->rds_ibdev->dev, ic->i_ack, in rds_ib_conn_path_shutdown()
1163 ic->i_ack_dma, DMA_TO_DEVICE); in rds_ib_conn_path_shutdown()
1164 ic->i_ack = NULL; in rds_ib_conn_path_shutdown()
1167 WARN_ON(ic->i_send_hdrs); in rds_ib_conn_path_shutdown()
1168 WARN_ON(ic->i_send_hdrs_dma); in rds_ib_conn_path_shutdown()
1169 WARN_ON(ic->i_recv_hdrs); in rds_ib_conn_path_shutdown()
1170 WARN_ON(ic->i_recv_hdrs_dma); in rds_ib_conn_path_shutdown()
1171 WARN_ON(ic->i_ack); in rds_ib_conn_path_shutdown()
1174 if (ic->i_sends) in rds_ib_conn_path_shutdown()
1176 if (ic->i_recvs) in rds_ib_conn_path_shutdown()
1179 rdma_destroy_id(ic->i_cm_id); in rds_ib_conn_path_shutdown()
1184 if (ic->rds_ibdev) in rds_ib_conn_path_shutdown()
1185 rds_ib_remove_conn(ic->rds_ibdev, conn); in rds_ib_conn_path_shutdown()
1187 ic->i_cm_id = NULL; in rds_ib_conn_path_shutdown()
1188 ic->i_pd = NULL; in rds_ib_conn_path_shutdown()
1189 ic->i_send_cq = NULL; in rds_ib_conn_path_shutdown()
1190 ic->i_recv_cq = NULL; in rds_ib_conn_path_shutdown()
1192 BUG_ON(ic->rds_ibdev); in rds_ib_conn_path_shutdown()
1195 if (ic->i_data_op) { in rds_ib_conn_path_shutdown()
1198 rm = container_of(ic->i_data_op, struct rds_message, data); in rds_ib_conn_path_shutdown()
1200 ic->i_data_op = NULL; in rds_ib_conn_path_shutdown()
1204 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); in rds_ib_conn_path_shutdown()
1206 atomic64_set(&ic->i_ack_next, 0); in rds_ib_conn_path_shutdown()
1208 ic->i_ack_next = 0; in rds_ib_conn_path_shutdown()
1210 ic->i_ack_recv = 0; in rds_ib_conn_path_shutdown()
1213 ic->i_flowctl = 0; in rds_ib_conn_path_shutdown()
1214 atomic_set(&ic->i_credits, 0); in rds_ib_conn_path_shutdown()
1216 /* Re-init rings, but retain sizes. */ in rds_ib_conn_path_shutdown()
1217 rds_ib_ring_init(&ic->i_send_ring, ic->i_send_ring.w_nr); in rds_ib_conn_path_shutdown()
1218 rds_ib_ring_init(&ic->i_recv_ring, ic->i_recv_ring.w_nr); in rds_ib_conn_path_shutdown()
1220 if (ic->i_ibinc) { in rds_ib_conn_path_shutdown()
1221 rds_inc_put(&ic->i_ibinc->ii_inc); in rds_ib_conn_path_shutdown()
1222 ic->i_ibinc = NULL; in rds_ib_conn_path_shutdown()
1225 vfree(ic->i_sends); in rds_ib_conn_path_shutdown()
1226 ic->i_sends = NULL; in rds_ib_conn_path_shutdown()
1227 vfree(ic->i_recvs); in rds_ib_conn_path_shutdown()
1228 ic->i_recvs = NULL; in rds_ib_conn_path_shutdown()
1229 ic->i_active_side = false; in rds_ib_conn_path_shutdown()
1241 return -ENOMEM; in rds_ib_conn_alloc()
1249 INIT_LIST_HEAD(&ic->ib_node); in rds_ib_conn_alloc()
1250 tasklet_init(&ic->i_send_tasklet, rds_ib_tasklet_fn_send, in rds_ib_conn_alloc()
1252 tasklet_init(&ic->i_recv_tasklet, rds_ib_tasklet_fn_recv, in rds_ib_conn_alloc()
1254 mutex_init(&ic->i_recv_mutex); in rds_ib_conn_alloc()
1256 spin_lock_init(&ic->i_ack_lock); in rds_ib_conn_alloc()
1258 atomic_set(&ic->i_signaled_sends, 0); in rds_ib_conn_alloc()
1259 atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR); in rds_ib_conn_alloc()
1265 rds_ib_ring_init(&ic->i_send_ring, 0); in rds_ib_conn_alloc()
1266 rds_ib_ring_init(&ic->i_recv_ring, 0); in rds_ib_conn_alloc()
1268 ic->conn = conn; in rds_ib_conn_alloc()
1269 conn->c_transport_data = ic; in rds_ib_conn_alloc()
1272 list_add_tail(&ic->ib_node, &ib_nodev_conns); in rds_ib_conn_alloc()
1276 rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data); in rds_ib_conn_alloc()
1295 lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock; in rds_ib_conn_free()
1298 list_del(&ic->ib_node); in rds_ib_conn_free()