Lines Matching refs:conn
64 void rds_send_reset(struct rds_connection *conn) in rds_send_reset() argument
69 if (conn->c_xmit_rm) { in rds_send_reset()
70 rm = conn->c_xmit_rm; in rds_send_reset()
71 conn->c_xmit_rm = NULL; in rds_send_reset()
80 conn->c_xmit_sg = 0; in rds_send_reset()
81 conn->c_xmit_hdr_off = 0; in rds_send_reset()
82 conn->c_xmit_data_off = 0; in rds_send_reset()
83 conn->c_xmit_atomic_sent = 0; in rds_send_reset()
84 conn->c_xmit_rdma_sent = 0; in rds_send_reset()
85 conn->c_xmit_data_sent = 0; in rds_send_reset()
87 conn->c_map_queued = 0; in rds_send_reset()
89 conn->c_unacked_packets = rds_sysctl_max_unacked_packets; in rds_send_reset()
90 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; in rds_send_reset()
93 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_reset()
94 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { in rds_send_reset()
98 list_splice_init(&conn->c_retrans, &conn->c_send_queue); in rds_send_reset()
99 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_reset()
102 static int acquire_in_xmit(struct rds_connection *conn) in acquire_in_xmit() argument
104 return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0; in acquire_in_xmit()
107 static void release_in_xmit(struct rds_connection *conn) in release_in_xmit() argument
109 clear_bit(RDS_IN_XMIT, &conn->c_flags); in release_in_xmit()
117 if (waitqueue_active(&conn->c_waitq)) in release_in_xmit()
118 wake_up_all(&conn->c_waitq); in release_in_xmit()
135 int rds_send_xmit(struct rds_connection *conn) in rds_send_xmit() argument
153 if (!acquire_in_xmit(conn)) { in rds_send_xmit()
163 if (!rds_conn_up(conn)) { in rds_send_xmit()
164 release_in_xmit(conn); in rds_send_xmit()
169 if (conn->c_trans->xmit_prepare) in rds_send_xmit()
170 conn->c_trans->xmit_prepare(conn); in rds_send_xmit()
178 rm = conn->c_xmit_rm; in rds_send_xmit()
184 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) { in rds_send_xmit()
185 rm = rds_cong_update_alloc(conn); in rds_send_xmit()
192 conn->c_xmit_rm = rm; in rds_send_xmit()
205 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_xmit()
207 if (!list_empty(&conn->c_send_queue)) { in rds_send_xmit()
208 rm = list_entry(conn->c_send_queue.next, in rds_send_xmit()
217 list_move_tail(&rm->m_conn_item, &conn->c_retrans); in rds_send_xmit()
220 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_xmit()
234 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_xmit()
237 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_xmit()
243 if (conn->c_unacked_packets == 0 || in rds_send_xmit()
244 conn->c_unacked_bytes < len) { in rds_send_xmit()
247 conn->c_unacked_packets = rds_sysctl_max_unacked_packets; in rds_send_xmit()
248 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; in rds_send_xmit()
251 conn->c_unacked_bytes -= len; in rds_send_xmit()
252 conn->c_unacked_packets--; in rds_send_xmit()
255 conn->c_xmit_rm = rm; in rds_send_xmit()
259 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) { in rds_send_xmit()
261 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); in rds_send_xmit()
264 conn->c_xmit_rdma_sent = 1; in rds_send_xmit()
271 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) { in rds_send_xmit()
273 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); in rds_send_xmit()
276 conn->c_xmit_atomic_sent = 1; in rds_send_xmit()
305 if (rm->data.op_active && !conn->c_xmit_data_sent) { in rds_send_xmit()
307 ret = conn->c_trans->xmit(conn, rm, in rds_send_xmit()
308 conn->c_xmit_hdr_off, in rds_send_xmit()
309 conn->c_xmit_sg, in rds_send_xmit()
310 conn->c_xmit_data_off); in rds_send_xmit()
314 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) { in rds_send_xmit()
317 conn->c_xmit_hdr_off); in rds_send_xmit()
318 conn->c_xmit_hdr_off += tmp; in rds_send_xmit()
322 sg = &rm->data.op_sg[conn->c_xmit_sg]; in rds_send_xmit()
325 conn->c_xmit_data_off); in rds_send_xmit()
326 conn->c_xmit_data_off += tmp; in rds_send_xmit()
328 if (conn->c_xmit_data_off == sg->length) { in rds_send_xmit()
329 conn->c_xmit_data_off = 0; in rds_send_xmit()
331 conn->c_xmit_sg++; in rds_send_xmit()
333 conn->c_xmit_sg == rm->data.op_nents); in rds_send_xmit()
337 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) && in rds_send_xmit()
338 (conn->c_xmit_sg == rm->data.op_nents)) in rds_send_xmit()
339 conn->c_xmit_data_sent = 1; in rds_send_xmit()
347 if (!rm->data.op_active || conn->c_xmit_data_sent) { in rds_send_xmit()
348 conn->c_xmit_rm = NULL; in rds_send_xmit()
349 conn->c_xmit_sg = 0; in rds_send_xmit()
350 conn->c_xmit_hdr_off = 0; in rds_send_xmit()
351 conn->c_xmit_data_off = 0; in rds_send_xmit()
352 conn->c_xmit_rdma_sent = 0; in rds_send_xmit()
353 conn->c_xmit_atomic_sent = 0; in rds_send_xmit()
354 conn->c_xmit_data_sent = 0; in rds_send_xmit()
360 if (conn->c_trans->xmit_complete) in rds_send_xmit()
361 conn->c_trans->xmit_complete(conn); in rds_send_xmit()
363 release_in_xmit(conn); in rds_send_xmit()
386 if (!list_empty(&conn->c_send_queue)) { in rds_send_xmit()
524 struct rds_message *rds_send_get_message(struct rds_connection *conn, in rds_send_get_message() argument
530 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_get_message()
532 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { in rds_send_get_message()
540 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { in rds_send_get_message()
549 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_get_message()
645 void rds_send_drop_acked(struct rds_connection *conn, u64 ack, in rds_send_drop_acked() argument
652 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_drop_acked()
654 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { in rds_send_drop_acked()
666 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_drop_acked()
676 struct rds_connection *conn; in rds_send_drop_to() local
704 conn = rm->m_inc.i_conn; in rds_send_drop_to()
706 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_drop_to()
713 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_drop_to()
717 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_drop_to()
751 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn, in rds_send_queue_rm() argument
794 rm->m_inc.i_conn = conn; in rds_send_queue_rm()
797 spin_lock(&conn->c_lock); in rds_send_queue_rm()
798 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++); in rds_send_queue_rm()
799 list_add_tail(&rm->m_conn_item, &conn->c_send_queue); in rds_send_queue_rm()
801 spin_unlock(&conn->c_lock); in rds_send_queue_rm()
929 struct rds_connection *conn; in rds_sendmsg() local
993 conn = rs->rs_conn; in rds_sendmsg()
995 conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr, in rds_sendmsg()
998 if (IS_ERR(conn)) { in rds_sendmsg()
999 ret = PTR_ERR(conn); in rds_sendmsg()
1002 rs->rs_conn = conn; in rds_sendmsg()
1010 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { in rds_sendmsg()
1012 &rm->rdma, conn->c_trans->xmit_rdma); in rds_sendmsg()
1017 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) { in rds_sendmsg()
1019 &rm->atomic, conn->c_trans->xmit_atomic); in rds_sendmsg()
1024 rds_conn_connect_if_down(conn); in rds_sendmsg()
1026 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); in rds_sendmsg()
1032 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, in rds_sendmsg()
1046 rds_send_queue_rm(rs, conn, rm, in rds_sendmsg()
1067 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) in rds_sendmsg()
1068 rds_send_xmit(conn); in rds_sendmsg()
1089 rds_send_pong(struct rds_connection *conn, __be16 dport) in rds_send_pong() argument
1101 rm->m_daddr = conn->c_faddr; in rds_send_pong()
1104 rds_conn_connect_if_down(conn); in rds_send_pong()
1106 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL); in rds_send_pong()
1110 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_pong()
1111 list_add_tail(&rm->m_conn_item, &conn->c_send_queue); in rds_send_pong()
1114 rm->m_inc.i_conn = conn; in rds_send_pong()
1117 conn->c_next_tx_seq); in rds_send_pong()
1118 conn->c_next_tx_seq++; in rds_send_pong()
1119 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_pong()
1124 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) in rds_send_pong()
1125 queue_delayed_work(rds_wq, &conn->c_send_w, 0); in rds_send_pong()