• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36 #include <linux/export.h>
37 #include <net/inet_hashtables.h>
38 
39 #include "rds.h"
40 #include "loop.h"
41 
42 #define RDS_CONNECTION_HASH_BITS 12
43 #define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
44 #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
45 
46 /* converting this to RCU is a chore for another day.. */
47 static DEFINE_SPINLOCK(rds_conn_lock);
48 static unsigned long rds_conn_count;
49 static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
50 static struct kmem_cache *rds_conn_slab;
51 
rds_conn_bucket(__be32 laddr,__be32 faddr)52 static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
53 {
54 	/* Pass NULL, don't need struct net for hash */
55 	unsigned long hash = inet_ehashfn(NULL,
56 					  be32_to_cpu(laddr), 0,
57 					  be32_to_cpu(faddr), 0);
58 	return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
59 }
60 
61 #define rds_conn_info_set(var, test, suffix) do {		\
62 	if (test)						\
63 		var |= RDS_INFO_CONNECTION_FLAG_##suffix;	\
64 } while (0)
65 
66 /* rcu read lock must be held or the connection spinlock */
rds_conn_lookup(struct hlist_head * head,__be32 laddr,__be32 faddr,struct rds_transport * trans)67 static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
68 					      __be32 laddr, __be32 faddr,
69 					      struct rds_transport *trans)
70 {
71 	struct rds_connection *conn, *ret = NULL;
72 
73 	hlist_for_each_entry_rcu(conn, head, c_hash_node) {
74 		if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
75 				conn->c_trans == trans) {
76 			ret = conn;
77 			break;
78 		}
79 	}
80 	rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret,
81 		 &laddr, &faddr);
82 	return ret;
83 }
84 
85 /*
86  * This is called by transports as they're bringing down a connection.
87  * It clears partial message state so that the transport can start sending
88  * and receiving over this connection again in the future.  It is up to
89  * the transport to have serialized this call with its send and recv.
90  */
rds_conn_reset(struct rds_connection * conn)91 static void rds_conn_reset(struct rds_connection *conn)
92 {
93 	rdsdebug("connection %pI4 to %pI4 reset\n",
94 	  &conn->c_laddr, &conn->c_faddr);
95 
96 	rds_stats_inc(s_conn_reset);
97 	rds_send_reset(conn);
98 	conn->c_flags = 0;
99 
100 	/* Do not clear next_rx_seq here, else we cannot distinguish
101 	 * retransmitted packets from new packets, and will hand all
102 	 * of them to the application. That is not consistent with the
103 	 * reliability guarantees of RDS. */
104 }
105 
106 /*
107  * There is only every one 'conn' for a given pair of addresses in the
108  * system at a time.  They contain messages to be retransmitted and so
109  * span the lifetime of the actual underlying transport connections.
110  *
111  * For now they are not garbage collected once they're created.  They
112  * are torn down as the module is removed, if ever.
113  */
__rds_conn_create(__be32 laddr,__be32 faddr,struct rds_transport * trans,gfp_t gfp,int is_outgoing)114 static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
115 				       struct rds_transport *trans, gfp_t gfp,
116 				       int is_outgoing)
117 {
118 	struct rds_connection *conn, *parent = NULL;
119 	struct hlist_head *head = rds_conn_bucket(laddr, faddr);
120 	struct rds_transport *loop_trans;
121 	unsigned long flags;
122 	int ret;
123 
124 	rcu_read_lock();
125 	conn = rds_conn_lookup(head, laddr, faddr, trans);
126 	if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
127 	    !is_outgoing) {
128 		/* This is a looped back IB connection, and we're
129 		 * called by the code handling the incoming connect.
130 		 * We need a second connection object into which we
131 		 * can stick the other QP. */
132 		parent = conn;
133 		conn = parent->c_passive;
134 	}
135 	rcu_read_unlock();
136 	if (conn)
137 		goto out;
138 
139 	conn = kmem_cache_zalloc(rds_conn_slab, gfp);
140 	if (!conn) {
141 		conn = ERR_PTR(-ENOMEM);
142 		goto out;
143 	}
144 
145 	INIT_HLIST_NODE(&conn->c_hash_node);
146 	conn->c_laddr = laddr;
147 	conn->c_faddr = faddr;
148 	spin_lock_init(&conn->c_lock);
149 	conn->c_next_tx_seq = 1;
150 
151 	init_waitqueue_head(&conn->c_waitq);
152 	INIT_LIST_HEAD(&conn->c_send_queue);
153 	INIT_LIST_HEAD(&conn->c_retrans);
154 
155 	ret = rds_cong_get_maps(conn);
156 	if (ret) {
157 		kmem_cache_free(rds_conn_slab, conn);
158 		conn = ERR_PTR(ret);
159 		goto out;
160 	}
161 
162 	/*
163 	 * This is where a connection becomes loopback.  If *any* RDS sockets
164 	 * can bind to the destination address then we'd rather the messages
165 	 * flow through loopback rather than either transport.
166 	 */
167 	loop_trans = rds_trans_get_preferred(faddr);
168 	if (loop_trans) {
169 		rds_trans_put(loop_trans);
170 		conn->c_loopback = 1;
171 		if (is_outgoing && trans->t_prefer_loopback) {
172 			/* "outgoing" connection - and the transport
173 			 * says it wants the connection handled by the
174 			 * loopback transport. This is what TCP does.
175 			 */
176 			trans = &rds_loop_transport;
177 		}
178 	}
179 
180 	conn->c_trans = trans;
181 
182 	ret = trans->conn_alloc(conn, gfp);
183 	if (ret) {
184 		kmem_cache_free(rds_conn_slab, conn);
185 		conn = ERR_PTR(ret);
186 		goto out;
187 	}
188 
189 	atomic_set(&conn->c_state, RDS_CONN_DOWN);
190 	conn->c_reconnect_jiffies = 0;
191 	INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker);
192 	INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
193 	INIT_DELAYED_WORK(&conn->c_conn_w, rds_connect_worker);
194 	INIT_WORK(&conn->c_down_w, rds_shutdown_worker);
195 	mutex_init(&conn->c_cm_lock);
196 	conn->c_flags = 0;
197 
198 	rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
199 	  conn, &laddr, &faddr,
200 	  trans->t_name ? trans->t_name : "[unknown]",
201 	  is_outgoing ? "(outgoing)" : "");
202 
203 	/*
204 	 * Since we ran without holding the conn lock, someone could
205 	 * have created the same conn (either normal or passive) in the
206 	 * interim. We check while holding the lock. If we won, we complete
207 	 * init and return our conn. If we lost, we rollback and return the
208 	 * other one.
209 	 */
210 	spin_lock_irqsave(&rds_conn_lock, flags);
211 	if (parent) {
212 		/* Creating passive conn */
213 		if (parent->c_passive) {
214 			trans->conn_free(conn->c_transport_data);
215 			kmem_cache_free(rds_conn_slab, conn);
216 			conn = parent->c_passive;
217 		} else {
218 			parent->c_passive = conn;
219 			rds_cong_add_conn(conn);
220 			rds_conn_count++;
221 		}
222 	} else {
223 		/* Creating normal conn */
224 		struct rds_connection *found;
225 
226 		found = rds_conn_lookup(head, laddr, faddr, trans);
227 		if (found) {
228 			trans->conn_free(conn->c_transport_data);
229 			kmem_cache_free(rds_conn_slab, conn);
230 			conn = found;
231 		} else {
232 			hlist_add_head_rcu(&conn->c_hash_node, head);
233 			rds_cong_add_conn(conn);
234 			rds_conn_count++;
235 		}
236 	}
237 	spin_unlock_irqrestore(&rds_conn_lock, flags);
238 
239 out:
240 	return conn;
241 }
242 
rds_conn_create(__be32 laddr,__be32 faddr,struct rds_transport * trans,gfp_t gfp)243 struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
244 				       struct rds_transport *trans, gfp_t gfp)
245 {
246 	return __rds_conn_create(laddr, faddr, trans, gfp, 0);
247 }
248 EXPORT_SYMBOL_GPL(rds_conn_create);
249 
rds_conn_create_outgoing(__be32 laddr,__be32 faddr,struct rds_transport * trans,gfp_t gfp)250 struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
251 				       struct rds_transport *trans, gfp_t gfp)
252 {
253 	return __rds_conn_create(laddr, faddr, trans, gfp, 1);
254 }
255 EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
256 
rds_conn_shutdown(struct rds_connection * conn)257 void rds_conn_shutdown(struct rds_connection *conn)
258 {
259 	/* shut it down unless it's down already */
260 	if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
261 		/*
262 		 * Quiesce the connection mgmt handlers before we start tearing
263 		 * things down. We don't hold the mutex for the entire
264 		 * duration of the shutdown operation, else we may be
265 		 * deadlocking with the CM handler. Instead, the CM event
266 		 * handler is supposed to check for state DISCONNECTING
267 		 */
268 		mutex_lock(&conn->c_cm_lock);
269 		if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING)
270 		 && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
271 			rds_conn_error(conn, "shutdown called in state %d\n",
272 					atomic_read(&conn->c_state));
273 			mutex_unlock(&conn->c_cm_lock);
274 			return;
275 		}
276 		mutex_unlock(&conn->c_cm_lock);
277 
278 		wait_event(conn->c_waitq,
279 			   !test_bit(RDS_IN_XMIT, &conn->c_flags));
280 
281 		conn->c_trans->conn_shutdown(conn);
282 		rds_conn_reset(conn);
283 
284 		if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) {
285 			/* This can happen - eg when we're in the middle of tearing
286 			 * down the connection, and someone unloads the rds module.
287 			 * Quite reproduceable with loopback connections.
288 			 * Mostly harmless.
289 			 */
290 			rds_conn_error(conn,
291 				"%s: failed to transition to state DOWN, "
292 				"current state is %d\n",
293 				__func__,
294 				atomic_read(&conn->c_state));
295 			return;
296 		}
297 	}
298 
299 	/* Then reconnect if it's still live.
300 	 * The passive side of an IB loopback connection is never added
301 	 * to the conn hash, so we never trigger a reconnect on this
302 	 * conn - the reconnect is always triggered by the active peer. */
303 	cancel_delayed_work_sync(&conn->c_conn_w);
304 	rcu_read_lock();
305 	if (!hlist_unhashed(&conn->c_hash_node)) {
306 		rcu_read_unlock();
307 		rds_queue_reconnect(conn);
308 	} else {
309 		rcu_read_unlock();
310 	}
311 }
312 
313 /*
314  * Stop and free a connection.
315  *
316  * This can only be used in very limited circumstances.  It assumes that once
317  * the conn has been shutdown that no one else is referencing the connection.
318  * We can only ensure this in the rmmod path in the current code.
319  */
rds_conn_destroy(struct rds_connection * conn)320 void rds_conn_destroy(struct rds_connection *conn)
321 {
322 	struct rds_message *rm, *rtmp;
323 	unsigned long flags;
324 
325 	rdsdebug("freeing conn %p for %pI4 -> "
326 		 "%pI4\n", conn, &conn->c_laddr,
327 		 &conn->c_faddr);
328 
329 	/* Ensure conn will not be scheduled for reconnect */
330 	spin_lock_irq(&rds_conn_lock);
331 	hlist_del_init_rcu(&conn->c_hash_node);
332 	spin_unlock_irq(&rds_conn_lock);
333 	synchronize_rcu();
334 
335 	/* shut the connection down */
336 	rds_conn_drop(conn);
337 	flush_work(&conn->c_down_w);
338 
339 	/* make sure lingering queued work won't try to ref the conn */
340 	cancel_delayed_work_sync(&conn->c_send_w);
341 	cancel_delayed_work_sync(&conn->c_recv_w);
342 
343 	/* tear down queued messages */
344 	list_for_each_entry_safe(rm, rtmp,
345 				 &conn->c_send_queue,
346 				 m_conn_item) {
347 		list_del_init(&rm->m_conn_item);
348 		BUG_ON(!list_empty(&rm->m_sock_item));
349 		rds_message_put(rm);
350 	}
351 	if (conn->c_xmit_rm)
352 		rds_message_put(conn->c_xmit_rm);
353 
354 	conn->c_trans->conn_free(conn->c_transport_data);
355 
356 	/*
357 	 * The congestion maps aren't freed up here.  They're
358 	 * freed by rds_cong_exit() after all the connections
359 	 * have been freed.
360 	 */
361 	rds_cong_remove_conn(conn);
362 
363 	BUG_ON(!list_empty(&conn->c_retrans));
364 	kmem_cache_free(rds_conn_slab, conn);
365 
366 	spin_lock_irqsave(&rds_conn_lock, flags);
367 	rds_conn_count--;
368 	spin_unlock_irqrestore(&rds_conn_lock, flags);
369 }
370 EXPORT_SYMBOL_GPL(rds_conn_destroy);
371 
rds_conn_message_info(struct socket * sock,unsigned int len,struct rds_info_iterator * iter,struct rds_info_lengths * lens,int want_send)372 static void rds_conn_message_info(struct socket *sock, unsigned int len,
373 				  struct rds_info_iterator *iter,
374 				  struct rds_info_lengths *lens,
375 				  int want_send)
376 {
377 	struct hlist_head *head;
378 	struct list_head *list;
379 	struct rds_connection *conn;
380 	struct rds_message *rm;
381 	unsigned int total = 0;
382 	unsigned long flags;
383 	size_t i;
384 
385 	len /= sizeof(struct rds_info_message);
386 
387 	rcu_read_lock();
388 
389 	for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
390 	     i++, head++) {
391 		hlist_for_each_entry_rcu(conn, head, c_hash_node) {
392 			if (want_send)
393 				list = &conn->c_send_queue;
394 			else
395 				list = &conn->c_retrans;
396 
397 			spin_lock_irqsave(&conn->c_lock, flags);
398 
399 			/* XXX too lazy to maintain counts.. */
400 			list_for_each_entry(rm, list, m_conn_item) {
401 				total++;
402 				if (total <= len)
403 					rds_inc_info_copy(&rm->m_inc, iter,
404 							  conn->c_laddr,
405 							  conn->c_faddr, 0);
406 			}
407 
408 			spin_unlock_irqrestore(&conn->c_lock, flags);
409 		}
410 	}
411 	rcu_read_unlock();
412 
413 	lens->nr = total;
414 	lens->each = sizeof(struct rds_info_message);
415 }
416 
rds_conn_message_info_send(struct socket * sock,unsigned int len,struct rds_info_iterator * iter,struct rds_info_lengths * lens)417 static void rds_conn_message_info_send(struct socket *sock, unsigned int len,
418 				       struct rds_info_iterator *iter,
419 				       struct rds_info_lengths *lens)
420 {
421 	rds_conn_message_info(sock, len, iter, lens, 1);
422 }
423 
rds_conn_message_info_retrans(struct socket * sock,unsigned int len,struct rds_info_iterator * iter,struct rds_info_lengths * lens)424 static void rds_conn_message_info_retrans(struct socket *sock,
425 					  unsigned int len,
426 					  struct rds_info_iterator *iter,
427 					  struct rds_info_lengths *lens)
428 {
429 	rds_conn_message_info(sock, len, iter, lens, 0);
430 }
431 
rds_for_each_conn_info(struct socket * sock,unsigned int len,struct rds_info_iterator * iter,struct rds_info_lengths * lens,int (* visitor)(struct rds_connection *,void *),size_t item_len)432 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
433 			  struct rds_info_iterator *iter,
434 			  struct rds_info_lengths *lens,
435 			  int (*visitor)(struct rds_connection *, void *),
436 			  size_t item_len)
437 {
438 	uint64_t buffer[(item_len + 7) / 8];
439 	struct hlist_head *head;
440 	struct rds_connection *conn;
441 	size_t i;
442 
443 	rcu_read_lock();
444 
445 	lens->nr = 0;
446 	lens->each = item_len;
447 
448 	for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
449 	     i++, head++) {
450 		hlist_for_each_entry_rcu(conn, head, c_hash_node) {
451 
452 			/* XXX no c_lock usage.. */
453 			if (!visitor(conn, buffer))
454 				continue;
455 
456 			/* We copy as much as we can fit in the buffer,
457 			 * but we count all items so that the caller
458 			 * can resize the buffer. */
459 			if (len >= item_len) {
460 				rds_info_copy(iter, buffer, item_len);
461 				len -= item_len;
462 			}
463 			lens->nr++;
464 		}
465 	}
466 	rcu_read_unlock();
467 }
468 EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
469 
rds_conn_info_visitor(struct rds_connection * conn,void * buffer)470 static int rds_conn_info_visitor(struct rds_connection *conn,
471 				  void *buffer)
472 {
473 	struct rds_info_connection *cinfo = buffer;
474 
475 	cinfo->next_tx_seq = conn->c_next_tx_seq;
476 	cinfo->next_rx_seq = conn->c_next_rx_seq;
477 	cinfo->laddr = conn->c_laddr;
478 	cinfo->faddr = conn->c_faddr;
479 	strncpy(cinfo->transport, conn->c_trans->t_name,
480 		sizeof(cinfo->transport));
481 	cinfo->flags = 0;
482 
483 	rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &conn->c_flags),
484 			  SENDING);
485 	/* XXX Future: return the state rather than these funky bits */
486 	rds_conn_info_set(cinfo->flags,
487 			  atomic_read(&conn->c_state) == RDS_CONN_CONNECTING,
488 			  CONNECTING);
489 	rds_conn_info_set(cinfo->flags,
490 			  atomic_read(&conn->c_state) == RDS_CONN_UP,
491 			  CONNECTED);
492 	return 1;
493 }
494 
rds_conn_info(struct socket * sock,unsigned int len,struct rds_info_iterator * iter,struct rds_info_lengths * lens)495 static void rds_conn_info(struct socket *sock, unsigned int len,
496 			  struct rds_info_iterator *iter,
497 			  struct rds_info_lengths *lens)
498 {
499 	rds_for_each_conn_info(sock, len, iter, lens,
500 				rds_conn_info_visitor,
501 				sizeof(struct rds_info_connection));
502 }
503 
rds_conn_init(void)504 int rds_conn_init(void)
505 {
506 	rds_conn_slab = kmem_cache_create("rds_connection",
507 					  sizeof(struct rds_connection),
508 					  0, 0, NULL);
509 	if (!rds_conn_slab)
510 		return -ENOMEM;
511 
512 	rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
513 	rds_info_register_func(RDS_INFO_SEND_MESSAGES,
514 			       rds_conn_message_info_send);
515 	rds_info_register_func(RDS_INFO_RETRANS_MESSAGES,
516 			       rds_conn_message_info_retrans);
517 
518 	return 0;
519 }
520 
rds_conn_exit(void)521 void rds_conn_exit(void)
522 {
523 	rds_loop_exit();
524 
525 	WARN_ON(!hlist_empty(rds_conn_hash));
526 
527 	kmem_cache_destroy(rds_conn_slab);
528 
529 	rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info);
530 	rds_info_deregister_func(RDS_INFO_SEND_MESSAGES,
531 				 rds_conn_message_info_send);
532 	rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES,
533 				 rds_conn_message_info_retrans);
534 }
535 
536 /*
537  * Force a disconnect
538  */
rds_conn_drop(struct rds_connection * conn)539 void rds_conn_drop(struct rds_connection *conn)
540 {
541 	atomic_set(&conn->c_state, RDS_CONN_ERROR);
542 	queue_work(rds_wq, &conn->c_down_w);
543 }
544 EXPORT_SYMBOL_GPL(rds_conn_drop);
545 
546 /*
547  * If the connection is down, trigger a connect. We may have scheduled a
548  * delayed reconnect however - in this case we should not interfere.
549  */
rds_conn_connect_if_down(struct rds_connection * conn)550 void rds_conn_connect_if_down(struct rds_connection *conn)
551 {
552 	if (rds_conn_state(conn) == RDS_CONN_DOWN &&
553 	    !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
554 		queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
555 }
556 EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
557 
558 /*
559  * An error occurred on the connection
560  */
561 void
__rds_conn_error(struct rds_connection * conn,const char * fmt,...)562 __rds_conn_error(struct rds_connection *conn, const char *fmt, ...)
563 {
564 	va_list ap;
565 
566 	va_start(ap, fmt);
567 	vprintk(fmt, ap);
568 	va_end(ap);
569 
570 	rds_conn_drop(conn);
571 }
572