• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/gfp.h>
35 #include <linux/in.h>
36 #include <net/tcp.h>
37 
38 #include "rds.h"
39 #include "tcp.h"
40 
rds_tcp_keepalive(struct socket * sock)41 int rds_tcp_keepalive(struct socket *sock)
42 {
43 	/* values below based on xs_udp_default_timeout */
44 	int keepidle = 5; /* send a probe 'keepidle' secs after last data */
45 	int keepcnt = 5; /* number of unack'ed probes before declaring dead */
46 	int keepalive = 1;
47 	int ret = 0;
48 
49 	ret = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
50 				(char *)&keepalive, sizeof(keepalive));
51 	if (ret < 0)
52 		goto bail;
53 
54 	ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPCNT,
55 				(char *)&keepcnt, sizeof(keepcnt));
56 	if (ret < 0)
57 		goto bail;
58 
59 	ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPIDLE,
60 				(char *)&keepidle, sizeof(keepidle));
61 	if (ret < 0)
62 		goto bail;
63 
64 	/* KEEPINTVL is the interval between successive probes. We follow
65 	 * the model in xs_tcp_finish_connecting() and re-use keepidle.
66 	 */
67 	ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPINTVL,
68 				(char *)&keepidle, sizeof(keepidle));
69 bail:
70 	return ret;
71 }
72 
rds_tcp_accept_one(struct socket * sock)73 int rds_tcp_accept_one(struct socket *sock)
74 {
75 	struct socket *new_sock = NULL;
76 	struct rds_connection *conn;
77 	int ret;
78 	struct inet_sock *inet;
79 	struct rds_tcp_connection *rs_tcp;
80 
81 	ret = sock_create_lite(sock->sk->sk_family,
82 			       sock->sk->sk_type, sock->sk->sk_protocol,
83 			       &new_sock);
84 	if (ret)
85 		goto out;
86 
87 	new_sock->type = sock->type;
88 	new_sock->ops = sock->ops;
89 	ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
90 	if (ret < 0)
91 		goto out;
92 
93 	ret = rds_tcp_keepalive(new_sock);
94 	if (ret < 0)
95 		goto out;
96 
97 	rds_tcp_tune(new_sock);
98 
99 	inet = inet_sk(new_sock->sk);
100 
101 	rdsdebug("accepted tcp %pI4:%u -> %pI4:%u\n",
102 		 &inet->inet_saddr, ntohs(inet->inet_sport),
103 		 &inet->inet_daddr, ntohs(inet->inet_dport));
104 
105 	conn = rds_conn_create(sock_net(sock->sk),
106 			       inet->inet_saddr, inet->inet_daddr,
107 			       &rds_tcp_transport, GFP_KERNEL);
108 	if (IS_ERR(conn)) {
109 		ret = PTR_ERR(conn);
110 		goto out;
111 	}
112 	/* An incoming SYN request came in, and TCP just accepted it.
113 	 *
114 	 * If the client reboots, this conn will need to be cleaned up.
115 	 * rds_tcp_state_change() will do that cleanup
116 	 */
117 	rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
118 	rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
119 	if (rs_tcp->t_sock) {
120 		/* Need to resolve a duelling SYN between peers.
121 		 * We have an outstanding SYN to this peer, which may
122 		 * potentially have transitioned to the RDS_CONN_UP state,
123 		 * so we must quiesce any send threads before resetting
124 		 * c_transport_data.
125 		 */
126 		wait_event(conn->c_waitq,
127 			   !test_bit(RDS_IN_XMIT, &conn->c_flags));
128 		if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
129 			struct sock *nsk = new_sock->sk;
130 
131 			nsk->sk_user_data = NULL;
132 			nsk->sk_prot->disconnect(nsk, 0);
133 			tcp_done(nsk);
134 			new_sock = NULL;
135 			ret = 0;
136 			goto out;
137 		} else if (rs_tcp->t_sock) {
138 			rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
139 			conn->c_outgoing = 0;
140 		}
141 	}
142 	rds_tcp_set_callbacks(new_sock, conn);
143 	rds_connect_complete(conn); /* marks RDS_CONN_UP */
144 	new_sock = NULL;
145 	ret = 0;
146 
147 out:
148 	if (new_sock)
149 		sock_release(new_sock);
150 	return ret;
151 }
152 
rds_tcp_listen_data_ready(struct sock * sk)153 void rds_tcp_listen_data_ready(struct sock *sk)
154 {
155 	void (*ready)(struct sock *sk);
156 
157 	rdsdebug("listen data ready sk %p\n", sk);
158 
159 	read_lock(&sk->sk_callback_lock);
160 	ready = sk->sk_user_data;
161 	if (!ready) { /* check for teardown race */
162 		ready = sk->sk_data_ready;
163 		goto out;
164 	}
165 
166 	/*
167 	 * ->sk_data_ready is also called for a newly established child socket
168 	 * before it has been accepted and the accepter has set up their
169 	 * data_ready.. we only want to queue listen work for our listening
170 	 * socket
171 	 */
172 	if (sk->sk_state == TCP_LISTEN)
173 		rds_tcp_accept_work(sk);
174 
175 out:
176 	read_unlock(&sk->sk_callback_lock);
177 	ready(sk);
178 }
179 
rds_tcp_listen_init(struct net * net)180 struct socket *rds_tcp_listen_init(struct net *net)
181 {
182 	struct sockaddr_in sin;
183 	struct socket *sock = NULL;
184 	int ret;
185 
186 	ret = sock_create_kern(net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
187 	if (ret < 0)
188 		goto out;
189 
190 	sock->sk->sk_reuse = SK_CAN_REUSE;
191 	rds_tcp_nonagle(sock);
192 
193 	write_lock_bh(&sock->sk->sk_callback_lock);
194 	sock->sk->sk_user_data = sock->sk->sk_data_ready;
195 	sock->sk->sk_data_ready = rds_tcp_listen_data_ready;
196 	write_unlock_bh(&sock->sk->sk_callback_lock);
197 
198 	sin.sin_family = PF_INET;
199 	sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
200 	sin.sin_port = (__force u16)htons(RDS_TCP_PORT);
201 
202 	ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
203 	if (ret < 0)
204 		goto out;
205 
206 	ret = sock->ops->listen(sock, 64);
207 	if (ret < 0)
208 		goto out;
209 
210 	return sock;
211 out:
212 	if (sock)
213 		sock_release(sock);
214 	return NULL;
215 }
216 
rds_tcp_listen_stop(struct socket * sock)217 void rds_tcp_listen_stop(struct socket *sock)
218 {
219 	struct sock *sk;
220 
221 	if (!sock)
222 		return;
223 
224 	sk = sock->sk;
225 
226 	/* serialize with and prevent further callbacks */
227 	lock_sock(sk);
228 	write_lock_bh(&sk->sk_callback_lock);
229 	if (sk->sk_user_data) {
230 		sk->sk_data_ready = sk->sk_user_data;
231 		sk->sk_user_data = NULL;
232 	}
233 	write_unlock_bh(&sk->sk_callback_lock);
234 	release_sock(sk);
235 
236 	/* wait for accepts to stop and close the socket */
237 	flush_workqueue(rds_wq);
238 	sock_release(sock);
239 }
240