1 /*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33 #include <linux/kernel.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36 #include <linux/export.h>
37 #include <net/inet_hashtables.h>
38
39 #include "rds.h"
40 #include "loop.h"
41
42 #define RDS_CONNECTION_HASH_BITS 12
43 #define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
44 #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
45
46 /* converting this to RCU is a chore for another day.. */
47 static DEFINE_SPINLOCK(rds_conn_lock);
48 static unsigned long rds_conn_count;
49 static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
50 static struct kmem_cache *rds_conn_slab;
51
rds_conn_bucket(__be32 laddr,__be32 faddr)52 static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
53 {
54 static u32 rds_hash_secret __read_mostly;
55
56 unsigned long hash;
57
58 net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret));
59
60 /* Pass NULL, don't need struct net for hash */
61 hash = __inet_ehashfn(be32_to_cpu(laddr), 0,
62 be32_to_cpu(faddr), 0,
63 rds_hash_secret);
64 return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
65 }
66
67 #define rds_conn_info_set(var, test, suffix) do { \
68 if (test) \
69 var |= RDS_INFO_CONNECTION_FLAG_##suffix; \
70 } while (0)
71
72 /* rcu read lock must be held or the connection spinlock */
rds_conn_lookup(struct hlist_head * head,__be32 laddr,__be32 faddr,struct rds_transport * trans)73 static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
74 __be32 laddr, __be32 faddr,
75 struct rds_transport *trans)
76 {
77 struct rds_connection *conn, *ret = NULL;
78
79 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
80 if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
81 conn->c_trans == trans) {
82 ret = conn;
83 break;
84 }
85 }
86 rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret,
87 &laddr, &faddr);
88 return ret;
89 }
90
91 /*
92 * This is called by transports as they're bringing down a connection.
93 * It clears partial message state so that the transport can start sending
94 * and receiving over this connection again in the future. It is up to
95 * the transport to have serialized this call with its send and recv.
96 */
rds_conn_reset(struct rds_connection * conn)97 static void rds_conn_reset(struct rds_connection *conn)
98 {
99 rdsdebug("connection %pI4 to %pI4 reset\n",
100 &conn->c_laddr, &conn->c_faddr);
101
102 rds_stats_inc(s_conn_reset);
103 rds_send_reset(conn);
104 conn->c_flags = 0;
105
106 /* Do not clear next_rx_seq here, else we cannot distinguish
107 * retransmitted packets from new packets, and will hand all
108 * of them to the application. That is not consistent with the
109 * reliability guarantees of RDS. */
110 }
111
112 /*
113 * There is only every one 'conn' for a given pair of addresses in the
114 * system at a time. They contain messages to be retransmitted and so
115 * span the lifetime of the actual underlying transport connections.
116 *
117 * For now they are not garbage collected once they're created. They
118 * are torn down as the module is removed, if ever.
119 */
__rds_conn_create(__be32 laddr,__be32 faddr,struct rds_transport * trans,gfp_t gfp,int is_outgoing)120 static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
121 struct rds_transport *trans, gfp_t gfp,
122 int is_outgoing)
123 {
124 struct rds_connection *conn, *parent = NULL;
125 struct hlist_head *head = rds_conn_bucket(laddr, faddr);
126 struct rds_transport *loop_trans;
127 unsigned long flags;
128 int ret;
129
130 rcu_read_lock();
131 conn = rds_conn_lookup(head, laddr, faddr, trans);
132 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
133 !is_outgoing) {
134 /* This is a looped back IB connection, and we're
135 * called by the code handling the incoming connect.
136 * We need a second connection object into which we
137 * can stick the other QP. */
138 parent = conn;
139 conn = parent->c_passive;
140 }
141 rcu_read_unlock();
142 if (conn)
143 goto out;
144
145 conn = kmem_cache_zalloc(rds_conn_slab, gfp);
146 if (!conn) {
147 conn = ERR_PTR(-ENOMEM);
148 goto out;
149 }
150
151 INIT_HLIST_NODE(&conn->c_hash_node);
152 conn->c_laddr = laddr;
153 conn->c_faddr = faddr;
154 spin_lock_init(&conn->c_lock);
155 conn->c_next_tx_seq = 1;
156
157 init_waitqueue_head(&conn->c_waitq);
158 INIT_LIST_HEAD(&conn->c_send_queue);
159 INIT_LIST_HEAD(&conn->c_retrans);
160
161 ret = rds_cong_get_maps(conn);
162 if (ret) {
163 kmem_cache_free(rds_conn_slab, conn);
164 conn = ERR_PTR(ret);
165 goto out;
166 }
167
168 /*
169 * This is where a connection becomes loopback. If *any* RDS sockets
170 * can bind to the destination address then we'd rather the messages
171 * flow through loopback rather than either transport.
172 */
173 loop_trans = rds_trans_get_preferred(faddr);
174 if (loop_trans) {
175 rds_trans_put(loop_trans);
176 conn->c_loopback = 1;
177 if (is_outgoing && trans->t_prefer_loopback) {
178 /* "outgoing" connection - and the transport
179 * says it wants the connection handled by the
180 * loopback transport. This is what TCP does.
181 */
182 trans = &rds_loop_transport;
183 }
184 }
185
186 if (trans == NULL) {
187 kmem_cache_free(rds_conn_slab, conn);
188 conn = ERR_PTR(-ENODEV);
189 goto out;
190 }
191
192 conn->c_trans = trans;
193
194 ret = trans->conn_alloc(conn, gfp);
195 if (ret) {
196 kmem_cache_free(rds_conn_slab, conn);
197 conn = ERR_PTR(ret);
198 goto out;
199 }
200
201 atomic_set(&conn->c_state, RDS_CONN_DOWN);
202 conn->c_reconnect_jiffies = 0;
203 INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker);
204 INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
205 INIT_DELAYED_WORK(&conn->c_conn_w, rds_connect_worker);
206 INIT_WORK(&conn->c_down_w, rds_shutdown_worker);
207 mutex_init(&conn->c_cm_lock);
208 conn->c_flags = 0;
209
210 rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
211 conn, &laddr, &faddr,
212 trans->t_name ? trans->t_name : "[unknown]",
213 is_outgoing ? "(outgoing)" : "");
214
215 /*
216 * Since we ran without holding the conn lock, someone could
217 * have created the same conn (either normal or passive) in the
218 * interim. We check while holding the lock. If we won, we complete
219 * init and return our conn. If we lost, we rollback and return the
220 * other one.
221 */
222 spin_lock_irqsave(&rds_conn_lock, flags);
223 if (parent) {
224 /* Creating passive conn */
225 if (parent->c_passive) {
226 trans->conn_free(conn->c_transport_data);
227 kmem_cache_free(rds_conn_slab, conn);
228 conn = parent->c_passive;
229 } else {
230 parent->c_passive = conn;
231 rds_cong_add_conn(conn);
232 rds_conn_count++;
233 }
234 } else {
235 /* Creating normal conn */
236 struct rds_connection *found;
237
238 found = rds_conn_lookup(head, laddr, faddr, trans);
239 if (found) {
240 trans->conn_free(conn->c_transport_data);
241 kmem_cache_free(rds_conn_slab, conn);
242 conn = found;
243 } else {
244 hlist_add_head_rcu(&conn->c_hash_node, head);
245 rds_cong_add_conn(conn);
246 rds_conn_count++;
247 }
248 }
249 spin_unlock_irqrestore(&rds_conn_lock, flags);
250
251 out:
252 return conn;
253 }
254
rds_conn_create(__be32 laddr,__be32 faddr,struct rds_transport * trans,gfp_t gfp)255 struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
256 struct rds_transport *trans, gfp_t gfp)
257 {
258 return __rds_conn_create(laddr, faddr, trans, gfp, 0);
259 }
260 EXPORT_SYMBOL_GPL(rds_conn_create);
261
rds_conn_create_outgoing(__be32 laddr,__be32 faddr,struct rds_transport * trans,gfp_t gfp)262 struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
263 struct rds_transport *trans, gfp_t gfp)
264 {
265 return __rds_conn_create(laddr, faddr, trans, gfp, 1);
266 }
267 EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
268
rds_conn_shutdown(struct rds_connection * conn)269 void rds_conn_shutdown(struct rds_connection *conn)
270 {
271 /* shut it down unless it's down already */
272 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
273 /*
274 * Quiesce the connection mgmt handlers before we start tearing
275 * things down. We don't hold the mutex for the entire
276 * duration of the shutdown operation, else we may be
277 * deadlocking with the CM handler. Instead, the CM event
278 * handler is supposed to check for state DISCONNECTING
279 */
280 mutex_lock(&conn->c_cm_lock);
281 if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING)
282 && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
283 rds_conn_error(conn, "shutdown called in state %d\n",
284 atomic_read(&conn->c_state));
285 mutex_unlock(&conn->c_cm_lock);
286 return;
287 }
288 mutex_unlock(&conn->c_cm_lock);
289
290 wait_event(conn->c_waitq,
291 !test_bit(RDS_IN_XMIT, &conn->c_flags));
292
293 conn->c_trans->conn_shutdown(conn);
294 rds_conn_reset(conn);
295
296 if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) {
297 /* This can happen - eg when we're in the middle of tearing
298 * down the connection, and someone unloads the rds module.
299 * Quite reproduceable with loopback connections.
300 * Mostly harmless.
301 */
302 rds_conn_error(conn,
303 "%s: failed to transition to state DOWN, "
304 "current state is %d\n",
305 __func__,
306 atomic_read(&conn->c_state));
307 return;
308 }
309 }
310
311 /* Then reconnect if it's still live.
312 * The passive side of an IB loopback connection is never added
313 * to the conn hash, so we never trigger a reconnect on this
314 * conn - the reconnect is always triggered by the active peer. */
315 cancel_delayed_work_sync(&conn->c_conn_w);
316 rcu_read_lock();
317 if (!hlist_unhashed(&conn->c_hash_node)) {
318 rcu_read_unlock();
319 rds_queue_reconnect(conn);
320 } else {
321 rcu_read_unlock();
322 }
323 }
324
325 /*
326 * Stop and free a connection.
327 *
328 * This can only be used in very limited circumstances. It assumes that once
329 * the conn has been shutdown that no one else is referencing the connection.
330 * We can only ensure this in the rmmod path in the current code.
331 */
rds_conn_destroy(struct rds_connection * conn)332 void rds_conn_destroy(struct rds_connection *conn)
333 {
334 struct rds_message *rm, *rtmp;
335 unsigned long flags;
336
337 rdsdebug("freeing conn %p for %pI4 -> "
338 "%pI4\n", conn, &conn->c_laddr,
339 &conn->c_faddr);
340
341 /* Ensure conn will not be scheduled for reconnect */
342 spin_lock_irq(&rds_conn_lock);
343 hlist_del_init_rcu(&conn->c_hash_node);
344 spin_unlock_irq(&rds_conn_lock);
345 synchronize_rcu();
346
347 /* shut the connection down */
348 rds_conn_drop(conn);
349 flush_work(&conn->c_down_w);
350
351 /* make sure lingering queued work won't try to ref the conn */
352 cancel_delayed_work_sync(&conn->c_send_w);
353 cancel_delayed_work_sync(&conn->c_recv_w);
354
355 /* tear down queued messages */
356 list_for_each_entry_safe(rm, rtmp,
357 &conn->c_send_queue,
358 m_conn_item) {
359 list_del_init(&rm->m_conn_item);
360 BUG_ON(!list_empty(&rm->m_sock_item));
361 rds_message_put(rm);
362 }
363 if (conn->c_xmit_rm)
364 rds_message_put(conn->c_xmit_rm);
365
366 conn->c_trans->conn_free(conn->c_transport_data);
367
368 /*
369 * The congestion maps aren't freed up here. They're
370 * freed by rds_cong_exit() after all the connections
371 * have been freed.
372 */
373 rds_cong_remove_conn(conn);
374
375 BUG_ON(!list_empty(&conn->c_retrans));
376 kmem_cache_free(rds_conn_slab, conn);
377
378 spin_lock_irqsave(&rds_conn_lock, flags);
379 rds_conn_count--;
380 spin_unlock_irqrestore(&rds_conn_lock, flags);
381 }
382 EXPORT_SYMBOL_GPL(rds_conn_destroy);
383
rds_conn_message_info(struct socket * sock,unsigned int len,struct rds_info_iterator * iter,struct rds_info_lengths * lens,int want_send)384 static void rds_conn_message_info(struct socket *sock, unsigned int len,
385 struct rds_info_iterator *iter,
386 struct rds_info_lengths *lens,
387 int want_send)
388 {
389 struct hlist_head *head;
390 struct list_head *list;
391 struct rds_connection *conn;
392 struct rds_message *rm;
393 unsigned int total = 0;
394 unsigned long flags;
395 size_t i;
396
397 len /= sizeof(struct rds_info_message);
398
399 rcu_read_lock();
400
401 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
402 i++, head++) {
403 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
404 if (want_send)
405 list = &conn->c_send_queue;
406 else
407 list = &conn->c_retrans;
408
409 spin_lock_irqsave(&conn->c_lock, flags);
410
411 /* XXX too lazy to maintain counts.. */
412 list_for_each_entry(rm, list, m_conn_item) {
413 total++;
414 if (total <= len)
415 rds_inc_info_copy(&rm->m_inc, iter,
416 conn->c_laddr,
417 conn->c_faddr, 0);
418 }
419
420 spin_unlock_irqrestore(&conn->c_lock, flags);
421 }
422 }
423 rcu_read_unlock();
424
425 lens->nr = total;
426 lens->each = sizeof(struct rds_info_message);
427 }
428
rds_conn_message_info_send(struct socket * sock,unsigned int len,struct rds_info_iterator * iter,struct rds_info_lengths * lens)429 static void rds_conn_message_info_send(struct socket *sock, unsigned int len,
430 struct rds_info_iterator *iter,
431 struct rds_info_lengths *lens)
432 {
433 rds_conn_message_info(sock, len, iter, lens, 1);
434 }
435
rds_conn_message_info_retrans(struct socket * sock,unsigned int len,struct rds_info_iterator * iter,struct rds_info_lengths * lens)436 static void rds_conn_message_info_retrans(struct socket *sock,
437 unsigned int len,
438 struct rds_info_iterator *iter,
439 struct rds_info_lengths *lens)
440 {
441 rds_conn_message_info(sock, len, iter, lens, 0);
442 }
443
rds_for_each_conn_info(struct socket * sock,unsigned int len,struct rds_info_iterator * iter,struct rds_info_lengths * lens,int (* visitor)(struct rds_connection *,void *),size_t item_len)444 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
445 struct rds_info_iterator *iter,
446 struct rds_info_lengths *lens,
447 int (*visitor)(struct rds_connection *, void *),
448 size_t item_len)
449 {
450 uint64_t buffer[(item_len + 7) / 8];
451 struct hlist_head *head;
452 struct rds_connection *conn;
453 size_t i;
454
455 rcu_read_lock();
456
457 lens->nr = 0;
458 lens->each = item_len;
459
460 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
461 i++, head++) {
462 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
463
464 /* XXX no c_lock usage.. */
465 if (!visitor(conn, buffer))
466 continue;
467
468 /* We copy as much as we can fit in the buffer,
469 * but we count all items so that the caller
470 * can resize the buffer. */
471 if (len >= item_len) {
472 rds_info_copy(iter, buffer, item_len);
473 len -= item_len;
474 }
475 lens->nr++;
476 }
477 }
478 rcu_read_unlock();
479 }
480 EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
481
rds_conn_info_visitor(struct rds_connection * conn,void * buffer)482 static int rds_conn_info_visitor(struct rds_connection *conn,
483 void *buffer)
484 {
485 struct rds_info_connection *cinfo = buffer;
486
487 cinfo->next_tx_seq = conn->c_next_tx_seq;
488 cinfo->next_rx_seq = conn->c_next_rx_seq;
489 cinfo->laddr = conn->c_laddr;
490 cinfo->faddr = conn->c_faddr;
491 strncpy(cinfo->transport, conn->c_trans->t_name,
492 sizeof(cinfo->transport));
493 cinfo->flags = 0;
494
495 rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &conn->c_flags),
496 SENDING);
497 /* XXX Future: return the state rather than these funky bits */
498 rds_conn_info_set(cinfo->flags,
499 atomic_read(&conn->c_state) == RDS_CONN_CONNECTING,
500 CONNECTING);
501 rds_conn_info_set(cinfo->flags,
502 atomic_read(&conn->c_state) == RDS_CONN_UP,
503 CONNECTED);
504 return 1;
505 }
506
rds_conn_info(struct socket * sock,unsigned int len,struct rds_info_iterator * iter,struct rds_info_lengths * lens)507 static void rds_conn_info(struct socket *sock, unsigned int len,
508 struct rds_info_iterator *iter,
509 struct rds_info_lengths *lens)
510 {
511 rds_for_each_conn_info(sock, len, iter, lens,
512 rds_conn_info_visitor,
513 sizeof(struct rds_info_connection));
514 }
515
rds_conn_init(void)516 int rds_conn_init(void)
517 {
518 rds_conn_slab = kmem_cache_create("rds_connection",
519 sizeof(struct rds_connection),
520 0, 0, NULL);
521 if (!rds_conn_slab)
522 return -ENOMEM;
523
524 rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
525 rds_info_register_func(RDS_INFO_SEND_MESSAGES,
526 rds_conn_message_info_send);
527 rds_info_register_func(RDS_INFO_RETRANS_MESSAGES,
528 rds_conn_message_info_retrans);
529
530 return 0;
531 }
532
rds_conn_exit(void)533 void rds_conn_exit(void)
534 {
535 rds_loop_exit();
536
537 WARN_ON(!hlist_empty(rds_conn_hash));
538
539 kmem_cache_destroy(rds_conn_slab);
540
541 rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info);
542 rds_info_deregister_func(RDS_INFO_SEND_MESSAGES,
543 rds_conn_message_info_send);
544 rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES,
545 rds_conn_message_info_retrans);
546 }
547
548 /*
549 * Force a disconnect
550 */
rds_conn_drop(struct rds_connection * conn)551 void rds_conn_drop(struct rds_connection *conn)
552 {
553 atomic_set(&conn->c_state, RDS_CONN_ERROR);
554 queue_work(rds_wq, &conn->c_down_w);
555 }
556 EXPORT_SYMBOL_GPL(rds_conn_drop);
557
558 /*
559 * If the connection is down, trigger a connect. We may have scheduled a
560 * delayed reconnect however - in this case we should not interfere.
561 */
rds_conn_connect_if_down(struct rds_connection * conn)562 void rds_conn_connect_if_down(struct rds_connection *conn)
563 {
564 if (rds_conn_state(conn) == RDS_CONN_DOWN &&
565 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
566 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
567 }
568 EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
569
570 /*
571 * An error occurred on the connection
572 */
573 void
__rds_conn_error(struct rds_connection * conn,const char * fmt,...)574 __rds_conn_error(struct rds_connection *conn, const char *fmt, ...)
575 {
576 va_list ap;
577
578 va_start(ap, fmt);
579 vprintk(fmt, ap);
580 va_end(ap);
581
582 rds_conn_drop(conn);
583 }
584