Lines Matching +full:retain +full:- +full:state +full:- +full:shutdown
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
46 #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
65 lhash = (__force u32)laddr->s6_addr32[3]; in rds_conn_bucket()
69 fhash = (__force u32)faddr->s6_addr32[3]; in rds_conn_bucket()
92 if (ipv6_addr_equal(&conn->c_faddr, faddr) && in rds_conn_lookup()
93 ipv6_addr_equal(&conn->c_laddr, laddr) && in rds_conn_lookup()
94 conn->c_trans == trans && in rds_conn_lookup()
95 conn->c_tos == tos && in rds_conn_lookup()
97 conn->c_dev_if == dev_if) { in rds_conn_lookup()
102 rdsdebug("returning conn %p for %pI6c -> %pI6c\n", ret, in rds_conn_lookup()
109 * It clears partial message state so that the transport can start sending
115 struct rds_connection *conn = cp->cp_conn; in rds_conn_path_reset()
118 &conn->c_laddr, &conn->c_faddr); in rds_conn_path_reset()
122 cp->cp_flags = 0; in rds_conn_path_reset()
133 spin_lock_init(&cp->cp_lock); in __rds_conn_path_init()
134 cp->cp_next_tx_seq = 1; in __rds_conn_path_init()
135 init_waitqueue_head(&cp->cp_waitq); in __rds_conn_path_init()
136 INIT_LIST_HEAD(&cp->cp_send_queue); in __rds_conn_path_init()
137 INIT_LIST_HEAD(&cp->cp_retrans); in __rds_conn_path_init()
139 cp->cp_conn = conn; in __rds_conn_path_init()
140 atomic_set(&cp->cp_state, RDS_CONN_DOWN); in __rds_conn_path_init()
141 cp->cp_send_gen = 0; in __rds_conn_path_init()
142 cp->cp_reconnect_jiffies = 0; in __rds_conn_path_init()
143 cp->cp_conn->c_proposed_version = RDS_PROTOCOL_VERSION; in __rds_conn_path_init()
144 INIT_DELAYED_WORK(&cp->cp_send_w, rds_send_worker); in __rds_conn_path_init()
145 INIT_DELAYED_WORK(&cp->cp_recv_w, rds_recv_worker); in __rds_conn_path_init()
146 INIT_DELAYED_WORK(&cp->cp_conn_w, rds_connect_worker); in __rds_conn_path_init()
147 INIT_WORK(&cp->cp_down_w, rds_shutdown_worker); in __rds_conn_path_init()
148 mutex_init(&cp->cp_cm_lock); in __rds_conn_path_init()
149 cp->cp_flags = 0; in __rds_conn_path_init()
173 int npaths = (trans->t_mp_capable ? RDS_MPATH_WORKERS : 1); in __rds_conn_create()
178 conn->c_loopback && in __rds_conn_create()
179 conn->c_trans != &rds_loop_transport && in __rds_conn_create()
187 conn = parent->c_passive; in __rds_conn_create()
195 conn = ERR_PTR(-ENOMEM); in __rds_conn_create()
198 conn->c_path = kcalloc(npaths, sizeof(struct rds_conn_path), gfp); in __rds_conn_create()
199 if (!conn->c_path) { in __rds_conn_create()
201 conn = ERR_PTR(-ENOMEM); in __rds_conn_create()
205 INIT_HLIST_NODE(&conn->c_hash_node); in __rds_conn_create()
206 conn->c_laddr = *laddr; in __rds_conn_create()
207 conn->c_isv6 = !ipv6_addr_v4mapped(laddr); in __rds_conn_create()
208 conn->c_faddr = *faddr; in __rds_conn_create()
209 conn->c_dev_if = dev_if; in __rds_conn_create()
210 conn->c_tos = tos; in __rds_conn_create()
219 conn->c_bound_if = dev_if; in __rds_conn_create()
222 conn->c_bound_if = 0; in __rds_conn_create()
228 kfree(conn->c_path); in __rds_conn_create()
239 loop_trans = rds_trans_get_preferred(net, faddr, conn->c_dev_if); in __rds_conn_create()
242 conn->c_loopback = 1; in __rds_conn_create()
243 if (trans->t_prefer_loopback) { in __rds_conn_create()
256 kfree(conn->c_path); in __rds_conn_create()
258 conn = ERR_PTR(-EOPNOTSUPP); in __rds_conn_create()
264 conn->c_trans = trans; in __rds_conn_create()
266 init_waitqueue_head(&conn->c_hs_waitq); in __rds_conn_create()
268 __rds_conn_path_init(conn, &conn->c_path[i], in __rds_conn_create()
270 conn->c_path[i].cp_index = i; in __rds_conn_create()
274 ret = -ENETDOWN; in __rds_conn_create()
276 ret = trans->conn_alloc(conn, GFP_ATOMIC); in __rds_conn_create()
279 kfree(conn->c_path); in __rds_conn_create()
285 rdsdebug("allocated conn %p for %pI6c -> %pI6c over %s %s\n", in __rds_conn_create()
287 strnlen(trans->t_name, sizeof(trans->t_name)) ? in __rds_conn_create()
288 trans->t_name : "[unknown]", is_outgoing ? "(outgoing)" : ""); in __rds_conn_create()
300 if (parent->c_passive) { in __rds_conn_create()
301 trans->conn_free(conn->c_path[0].cp_transport_data); in __rds_conn_create()
302 kfree(conn->c_path); in __rds_conn_create()
304 conn = parent->c_passive; in __rds_conn_create()
306 parent->c_passive = conn; in __rds_conn_create()
321 cp = &conn->c_path[i]; in __rds_conn_create()
322 /* The ->conn_alloc invocation may have in __rds_conn_create()
326 if (cp->cp_transport_data) in __rds_conn_create()
327 trans->conn_free(cp->cp_transport_data); in __rds_conn_create()
329 kfree(conn->c_path); in __rds_conn_create()
333 conn->c_my_gen_num = rds_gen_num; in __rds_conn_create()
334 conn->c_peer_gen_num = 0; in __rds_conn_create()
335 hlist_add_head_rcu(&conn->c_hash_node, head); in __rds_conn_create()
369 struct rds_connection *conn = cp->cp_conn; in rds_conn_shutdown()
376 * duration of the shutdown operation, else we may be in rds_conn_shutdown()
378 * handler is supposed to check for state DISCONNECTING in rds_conn_shutdown()
380 mutex_lock(&cp->cp_cm_lock); in rds_conn_shutdown()
386 "shutdown called in state %d\n", in rds_conn_shutdown()
387 atomic_read(&cp->cp_state)); in rds_conn_shutdown()
388 mutex_unlock(&cp->cp_cm_lock); in rds_conn_shutdown()
391 mutex_unlock(&cp->cp_cm_lock); in rds_conn_shutdown()
393 wait_event(cp->cp_waitq, in rds_conn_shutdown()
394 !test_bit(RDS_IN_XMIT, &cp->cp_flags)); in rds_conn_shutdown()
395 wait_event(cp->cp_waitq, in rds_conn_shutdown()
396 !test_bit(RDS_RECV_REFILL, &cp->cp_flags)); in rds_conn_shutdown()
398 conn->c_trans->conn_path_shutdown(cp); in rds_conn_shutdown()
405 /* This can happen - eg when we're in the middle of tearing in rds_conn_shutdown()
410 * Note that this also happens with rds-tcp because in rds_conn_shutdown()
417 "to state DOWN, current state " in rds_conn_shutdown()
419 atomic_read(&cp->cp_state)); in rds_conn_shutdown()
427 * conn - the reconnect is always triggered by the active peer. */ in rds_conn_shutdown()
428 cancel_delayed_work_sync(&cp->cp_conn_w); in rds_conn_shutdown()
430 if (!hlist_unhashed(&conn->c_hash_node)) { in rds_conn_shutdown()
445 if (!cp->cp_transport_data) in rds_conn_path_destroy()
449 cancel_delayed_work_sync(&cp->cp_send_w); in rds_conn_path_destroy()
450 cancel_delayed_work_sync(&cp->cp_recv_w); in rds_conn_path_destroy()
453 flush_work(&cp->cp_down_w); in rds_conn_path_destroy()
457 &cp->cp_send_queue, in rds_conn_path_destroy()
459 list_del_init(&rm->m_conn_item); in rds_conn_path_destroy()
460 BUG_ON(!list_empty(&rm->m_sock_item)); in rds_conn_path_destroy()
463 if (cp->cp_xmit_rm) in rds_conn_path_destroy()
464 rds_message_put(cp->cp_xmit_rm); in rds_conn_path_destroy()
466 WARN_ON(delayed_work_pending(&cp->cp_send_w)); in rds_conn_path_destroy()
467 WARN_ON(delayed_work_pending(&cp->cp_recv_w)); in rds_conn_path_destroy()
468 WARN_ON(delayed_work_pending(&cp->cp_conn_w)); in rds_conn_path_destroy()
469 WARN_ON(work_pending(&cp->cp_down_w)); in rds_conn_path_destroy()
471 cp->cp_conn->c_trans->conn_free(cp->cp_transport_data); in rds_conn_path_destroy()
478 * the conn has been shutdown that no one else is referencing the connection.
486 int npaths = (conn->c_trans->t_mp_capable ? RDS_MPATH_WORKERS : 1); in rds_conn_destroy()
488 rdsdebug("freeing conn %p for %pI4 -> " in rds_conn_destroy()
489 "%pI4\n", conn, &conn->c_laddr, in rds_conn_destroy()
490 &conn->c_faddr); in rds_conn_destroy()
494 hlist_del_init_rcu(&conn->c_hash_node); in rds_conn_destroy()
500 cp = &conn->c_path[i]; in rds_conn_destroy()
502 BUG_ON(!list_empty(&cp->cp_retrans)); in rds_conn_destroy()
512 kfree(conn->c_path); in rds_conn_destroy()
516 rds_conn_count--; in rds_conn_destroy()
561 if (!isv6 && conn->c_isv6) in rds_conn_message_info_cmn()
564 npaths = (conn->c_trans->t_mp_capable ? in rds_conn_message_info_cmn()
568 cp = &conn->c_path[j]; in rds_conn_message_info_cmn()
570 list = &cp->cp_send_queue; in rds_conn_message_info_cmn()
572 list = &cp->cp_retrans; in rds_conn_message_info_cmn()
574 spin_lock_irqsave(&cp->cp_lock, flags); in rds_conn_message_info_cmn()
580 __rds_inc_msg_cp(&rm->m_inc, in rds_conn_message_info_cmn()
582 &conn->c_laddr, in rds_conn_message_info_cmn()
583 &conn->c_faddr, in rds_conn_message_info_cmn()
587 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_conn_message_info_cmn()
593 lens->nr = total; in rds_conn_message_info_cmn()
595 lens->each = sizeof(struct rds6_info_message); in rds_conn_message_info_cmn()
597 lens->each = sizeof(struct rds_info_message); in rds_conn_message_info_cmn()
665 lens->nr = 0; in rds_for_each_conn_info()
666 lens->each = item_len; in rds_for_each_conn_info()
681 len -= item_len; in rds_for_each_conn_info()
683 lens->nr++; in rds_for_each_conn_info()
703 lens->nr = 0; in rds_walk_conn_path_info()
704 lens->each = item_len; in rds_walk_conn_path_info()
719 cp = conn->c_path; in rds_walk_conn_path_info()
731 len -= item_len; in rds_walk_conn_path_info()
733 lens->nr++; in rds_walk_conn_path_info()
742 struct rds_connection *conn = cp->cp_conn; in rds_conn_info_visitor()
744 if (conn->c_isv6) in rds_conn_info_visitor()
747 cinfo->next_tx_seq = cp->cp_next_tx_seq; in rds_conn_info_visitor()
748 cinfo->next_rx_seq = cp->cp_next_rx_seq; in rds_conn_info_visitor()
749 cinfo->laddr = conn->c_laddr.s6_addr32[3]; in rds_conn_info_visitor()
750 cinfo->faddr = conn->c_faddr.s6_addr32[3]; in rds_conn_info_visitor()
751 cinfo->tos = conn->c_tos; in rds_conn_info_visitor()
752 strncpy(cinfo->transport, conn->c_trans->t_name, in rds_conn_info_visitor()
753 sizeof(cinfo->transport)); in rds_conn_info_visitor()
754 cinfo->flags = 0; in rds_conn_info_visitor()
756 rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags), in rds_conn_info_visitor()
758 /* XXX Future: return the state rather than these funky bits */ in rds_conn_info_visitor()
759 rds_conn_info_set(cinfo->flags, in rds_conn_info_visitor()
760 atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING, in rds_conn_info_visitor()
762 rds_conn_info_set(cinfo->flags, in rds_conn_info_visitor()
763 atomic_read(&cp->cp_state) == RDS_CONN_UP, in rds_conn_info_visitor()
772 struct rds_connection *conn = cp->cp_conn; in rds6_conn_info_visitor()
774 cinfo6->next_tx_seq = cp->cp_next_tx_seq; in rds6_conn_info_visitor()
775 cinfo6->next_rx_seq = cp->cp_next_rx_seq; in rds6_conn_info_visitor()
776 cinfo6->laddr = conn->c_laddr; in rds6_conn_info_visitor()
777 cinfo6->faddr = conn->c_faddr; in rds6_conn_info_visitor()
778 strncpy(cinfo6->transport, conn->c_trans->t_name, in rds6_conn_info_visitor()
779 sizeof(cinfo6->transport)); in rds6_conn_info_visitor()
780 cinfo6->flags = 0; in rds6_conn_info_visitor()
782 rds_conn_info_set(cinfo6->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags), in rds6_conn_info_visitor()
784 /* XXX Future: return the state rather than these funky bits */ in rds6_conn_info_visitor()
785 rds_conn_info_set(cinfo6->flags, in rds6_conn_info_visitor()
786 atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING, in rds6_conn_info_visitor()
788 rds_conn_info_set(cinfo6->flags, in rds6_conn_info_visitor()
789 atomic_read(&cp->cp_state) == RDS_CONN_UP, in rds6_conn_info_visitor()
837 return -ENOMEM; in rds_conn_init()
883 atomic_set(&cp->cp_state, RDS_CONN_ERROR); in rds_conn_path_drop()
886 if (!destroy && rds_destroy_pending(cp->cp_conn)) { in rds_conn_path_drop()
890 queue_work(rds_wq, &cp->cp_down_w); in rds_conn_path_drop()
897 WARN_ON(conn->c_trans->t_mp_capable); in rds_conn_drop()
898 rds_conn_path_drop(&conn->c_path[0], false); in rds_conn_drop()
904 * delayed reconnect however - in this case we should not interfere.
909 if (rds_destroy_pending(cp->cp_conn)) { in rds_conn_path_connect_if_down()
914 !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags)) in rds_conn_path_connect_if_down()
915 queue_delayed_work(rds_wq, &cp->cp_conn_w, 0); in rds_conn_path_connect_if_down()
927 rds_conn_path_connect_if_down(&conn->c_path[i]); in rds_check_all_paths()
928 } while (++i < conn->c_npaths); in rds_check_all_paths()
933 WARN_ON(conn->c_trans->t_mp_capable); in rds_conn_connect_if_down()
934 rds_conn_path_connect_if_down(&conn->c_path[0]); in rds_conn_connect_if_down()