• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Client connection-specific management code.
3  *
4  * Copyright (C) 2016, 2020 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  *
7  * Client connections need to be cached for a little while after they've made a
8  * call so as to handle retransmitted DATA packets in case the server didn't
9  * receive the final ACK or terminating ABORT we sent it.
10  *
11  * There are flags of relevance to the cache:
12  *
13  *  (2) DONT_REUSE - The connection should be discarded as soon as possible and
14  *      should not be reused.  This is set when an exclusive connection is used
15  *      or a call ID counter overflows.
16  *
17  * The caching state may only be changed if the cache lock is held.
18  *
19  * There are two idle client connection expiry durations.  If the total number
20  * of connections is below the reap threshold, we use the normal duration; if
21  * it's above, we use the fast duration.
22  */
23 
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 
26 #include <linux/slab.h>
27 #include <linux/idr.h>
28 #include <linux/timer.h>
29 #include <linux/sched/signal.h>
30 
31 #include "ar-internal.h"
32 
33 __read_mostly unsigned int rxrpc_reap_client_connections = 900;
34 __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
35 __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
36 
37 /*
38  * We use machine-unique IDs for our client connections.
39  */
40 DEFINE_IDR(rxrpc_client_conn_ids);
41 static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
42 
43 /*
44  * Get a connection ID and epoch for a client connection from the global pool.
45  * The connection struct pointer is then recorded in the idr radix tree.  The
46  * epoch doesn't change until the client is rebooted (or, at least, unless the
47  * module is unloaded).
48  */
rxrpc_get_client_connection_id(struct rxrpc_connection * conn,gfp_t gfp)49 static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
50 					  gfp_t gfp)
51 {
52 	struct rxrpc_net *rxnet = conn->params.local->rxnet;
53 	int id;
54 
55 	_enter("");
56 
57 	idr_preload(gfp);
58 	spin_lock(&rxrpc_conn_id_lock);
59 
60 	id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
61 			      1, 0x40000000, GFP_NOWAIT);
62 	if (id < 0)
63 		goto error;
64 
65 	spin_unlock(&rxrpc_conn_id_lock);
66 	idr_preload_end();
67 
68 	conn->proto.epoch = rxnet->epoch;
69 	conn->proto.cid = id << RXRPC_CIDSHIFT;
70 	set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
71 	_leave(" [CID %x]", conn->proto.cid);
72 	return 0;
73 
74 error:
75 	spin_unlock(&rxrpc_conn_id_lock);
76 	idr_preload_end();
77 	_leave(" = %d", id);
78 	return id;
79 }
80 
81 /*
82  * Release a connection ID for a client connection from the global pool.
83  */
rxrpc_put_client_connection_id(struct rxrpc_connection * conn)84 static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
85 {
86 	if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
87 		spin_lock(&rxrpc_conn_id_lock);
88 		idr_remove(&rxrpc_client_conn_ids,
89 			   conn->proto.cid >> RXRPC_CIDSHIFT);
90 		spin_unlock(&rxrpc_conn_id_lock);
91 	}
92 }
93 
94 /*
95  * Destroy the client connection ID tree.
96  */
rxrpc_destroy_client_conn_ids(void)97 void rxrpc_destroy_client_conn_ids(void)
98 {
99 	struct rxrpc_connection *conn;
100 	int id;
101 
102 	if (!idr_is_empty(&rxrpc_client_conn_ids)) {
103 		idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
104 			pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
105 			       conn, atomic_read(&conn->usage));
106 		}
107 		BUG();
108 	}
109 
110 	idr_destroy(&rxrpc_client_conn_ids);
111 }
112 
113 /*
114  * Allocate a connection bundle.
115  */
rxrpc_alloc_bundle(struct rxrpc_conn_parameters * cp,gfp_t gfp)116 static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp,
117 					       gfp_t gfp)
118 {
119 	struct rxrpc_bundle *bundle;
120 
121 	bundle = kzalloc(sizeof(*bundle), gfp);
122 	if (bundle) {
123 		bundle->params = *cp;
124 		rxrpc_get_peer(bundle->params.peer);
125 		atomic_set(&bundle->usage, 1);
126 		spin_lock_init(&bundle->channel_lock);
127 		INIT_LIST_HEAD(&bundle->waiting_calls);
128 	}
129 	return bundle;
130 }
131 
rxrpc_get_bundle(struct rxrpc_bundle * bundle)132 struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle)
133 {
134 	atomic_inc(&bundle->usage);
135 	return bundle;
136 }
137 
rxrpc_free_bundle(struct rxrpc_bundle * bundle)138 static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
139 {
140 	rxrpc_put_peer(bundle->params.peer);
141 	kfree(bundle);
142 }
143 
rxrpc_put_bundle(struct rxrpc_bundle * bundle)144 void rxrpc_put_bundle(struct rxrpc_bundle *bundle)
145 {
146 	unsigned int d = bundle->debug_id;
147 	unsigned int u = atomic_dec_return(&bundle->usage);
148 
149 	_debug("PUT B=%x %u", d, u);
150 	if (u == 0)
151 		rxrpc_free_bundle(bundle);
152 }
153 
154 /*
155  * Allocate a client connection.
156  */
157 static struct rxrpc_connection *
rxrpc_alloc_client_connection(struct rxrpc_bundle * bundle,gfp_t gfp)158 rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp)
159 {
160 	struct rxrpc_connection *conn;
161 	struct rxrpc_net *rxnet = bundle->params.local->rxnet;
162 	int ret;
163 
164 	_enter("");
165 
166 	conn = rxrpc_alloc_connection(gfp);
167 	if (!conn) {
168 		_leave(" = -ENOMEM");
169 		return ERR_PTR(-ENOMEM);
170 	}
171 
172 	atomic_set(&conn->usage, 1);
173 	conn->bundle		= bundle;
174 	conn->params		= bundle->params;
175 	conn->out_clientflag	= RXRPC_CLIENT_INITIATED;
176 	conn->state		= RXRPC_CONN_CLIENT;
177 	conn->service_id	= conn->params.service_id;
178 
179 	ret = rxrpc_get_client_connection_id(conn, gfp);
180 	if (ret < 0)
181 		goto error_0;
182 
183 	ret = rxrpc_init_client_conn_security(conn);
184 	if (ret < 0)
185 		goto error_1;
186 
187 	ret = conn->security->prime_packet_security(conn);
188 	if (ret < 0)
189 		goto error_2;
190 
191 	atomic_inc(&rxnet->nr_conns);
192 	write_lock(&rxnet->conn_lock);
193 	list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
194 	write_unlock(&rxnet->conn_lock);
195 
196 	rxrpc_get_bundle(bundle);
197 	rxrpc_get_peer(conn->params.peer);
198 	rxrpc_get_local(conn->params.local);
199 	key_get(conn->params.key);
200 
201 	trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client,
202 			 atomic_read(&conn->usage),
203 			 __builtin_return_address(0));
204 
205 	atomic_inc(&rxnet->nr_client_conns);
206 	trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
207 	_leave(" = %p", conn);
208 	return conn;
209 
210 error_2:
211 	conn->security->clear(conn);
212 error_1:
213 	rxrpc_put_client_connection_id(conn);
214 error_0:
215 	kfree(conn);
216 	_leave(" = %d", ret);
217 	return ERR_PTR(ret);
218 }
219 
220 /*
221  * Determine if a connection may be reused.
222  */
rxrpc_may_reuse_conn(struct rxrpc_connection * conn)223 static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
224 {
225 	struct rxrpc_net *rxnet;
226 	int id_cursor, id, distance, limit;
227 
228 	if (!conn)
229 		goto dont_reuse;
230 
231 	rxnet = conn->params.local->rxnet;
232 	if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
233 		goto dont_reuse;
234 
235 	if (conn->state != RXRPC_CONN_CLIENT ||
236 	    conn->proto.epoch != rxnet->epoch)
237 		goto mark_dont_reuse;
238 
239 	/* The IDR tree gets very expensive on memory if the connection IDs are
240 	 * widely scattered throughout the number space, so we shall want to
241 	 * kill off connections that, say, have an ID more than about four
242 	 * times the maximum number of client conns away from the current
243 	 * allocation point to try and keep the IDs concentrated.
244 	 */
245 	id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
246 	id = conn->proto.cid >> RXRPC_CIDSHIFT;
247 	distance = id - id_cursor;
248 	if (distance < 0)
249 		distance = -distance;
250 	limit = max_t(unsigned long, atomic_read(&rxnet->nr_conns) * 4, 1024);
251 	if (distance > limit)
252 		goto mark_dont_reuse;
253 
254 	return true;
255 
256 mark_dont_reuse:
257 	set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
258 dont_reuse:
259 	return false;
260 }
261 
262 /*
263  * Look up the conn bundle that matches the connection parameters, adding it if
264  * it doesn't yet exist.
265  */
rxrpc_look_up_bundle(struct rxrpc_conn_parameters * cp,gfp_t gfp)266 static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *cp,
267 						 gfp_t gfp)
268 {
269 	static atomic_t rxrpc_bundle_id;
270 	struct rxrpc_bundle *bundle, *candidate;
271 	struct rxrpc_local *local = cp->local;
272 	struct rb_node *p, **pp, *parent;
273 	long diff;
274 
275 	_enter("{%px,%x,%u,%u}",
276 	       cp->peer, key_serial(cp->key), cp->security_level, cp->upgrade);
277 
278 	if (cp->exclusive)
279 		return rxrpc_alloc_bundle(cp, gfp);
280 
281 	/* First, see if the bundle is already there. */
282 	_debug("search 1");
283 	spin_lock(&local->client_bundles_lock);
284 	p = local->client_bundles.rb_node;
285 	while (p) {
286 		bundle = rb_entry(p, struct rxrpc_bundle, local_node);
287 
288 #define cmp(X) ((long)bundle->params.X - (long)cp->X)
289 		diff = (cmp(peer) ?:
290 			cmp(key) ?:
291 			cmp(security_level) ?:
292 			cmp(upgrade));
293 #undef cmp
294 		if (diff < 0)
295 			p = p->rb_left;
296 		else if (diff > 0)
297 			p = p->rb_right;
298 		else
299 			goto found_bundle;
300 	}
301 	spin_unlock(&local->client_bundles_lock);
302 	_debug("not found");
303 
304 	/* It wasn't.  We need to add one. */
305 	candidate = rxrpc_alloc_bundle(cp, gfp);
306 	if (!candidate)
307 		return NULL;
308 
309 	_debug("search 2");
310 	spin_lock(&local->client_bundles_lock);
311 	pp = &local->client_bundles.rb_node;
312 	parent = NULL;
313 	while (*pp) {
314 		parent = *pp;
315 		bundle = rb_entry(parent, struct rxrpc_bundle, local_node);
316 
317 #define cmp(X) ((long)bundle->params.X - (long)cp->X)
318 		diff = (cmp(peer) ?:
319 			cmp(key) ?:
320 			cmp(security_level) ?:
321 			cmp(upgrade));
322 #undef cmp
323 		if (diff < 0)
324 			pp = &(*pp)->rb_left;
325 		else if (diff > 0)
326 			pp = &(*pp)->rb_right;
327 		else
328 			goto found_bundle_free;
329 	}
330 
331 	_debug("new bundle");
332 	candidate->debug_id = atomic_inc_return(&rxrpc_bundle_id);
333 	rb_link_node(&candidate->local_node, parent, pp);
334 	rb_insert_color(&candidate->local_node, &local->client_bundles);
335 	rxrpc_get_bundle(candidate);
336 	spin_unlock(&local->client_bundles_lock);
337 	_leave(" = %u [new]", candidate->debug_id);
338 	return candidate;
339 
340 found_bundle_free:
341 	rxrpc_free_bundle(candidate);
342 found_bundle:
343 	rxrpc_get_bundle(bundle);
344 	spin_unlock(&local->client_bundles_lock);
345 	_leave(" = %u [found]", bundle->debug_id);
346 	return bundle;
347 }
348 
349 /*
350  * Create or find a client bundle to use for a call.
351  *
352  * If we return with a connection, the call will be on its waiting list.  It's
353  * left to the caller to assign a channel and wake up the call.
354  */
rxrpc_prep_call(struct rxrpc_sock * rx,struct rxrpc_call * call,struct rxrpc_conn_parameters * cp,struct sockaddr_rxrpc * srx,gfp_t gfp)355 static struct rxrpc_bundle *rxrpc_prep_call(struct rxrpc_sock *rx,
356 					    struct rxrpc_call *call,
357 					    struct rxrpc_conn_parameters *cp,
358 					    struct sockaddr_rxrpc *srx,
359 					    gfp_t gfp)
360 {
361 	struct rxrpc_bundle *bundle;
362 
363 	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
364 
365 	cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
366 	if (!cp->peer)
367 		goto error;
368 
369 	call->cong_cwnd = cp->peer->cong_cwnd;
370 	if (call->cong_cwnd >= call->cong_ssthresh)
371 		call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
372 	else
373 		call->cong_mode = RXRPC_CALL_SLOW_START;
374 	if (cp->upgrade)
375 		__set_bit(RXRPC_CALL_UPGRADE, &call->flags);
376 
377 	/* Find the client connection bundle. */
378 	bundle = rxrpc_look_up_bundle(cp, gfp);
379 	if (!bundle)
380 		goto error;
381 
382 	/* Get this call queued.  Someone else may activate it whilst we're
383 	 * lining up a new connection, but that's fine.
384 	 */
385 	spin_lock(&bundle->channel_lock);
386 	list_add_tail(&call->chan_wait_link, &bundle->waiting_calls);
387 	spin_unlock(&bundle->channel_lock);
388 
389 	_leave(" = [B=%x]", bundle->debug_id);
390 	return bundle;
391 
392 error:
393 	_leave(" = -ENOMEM");
394 	return ERR_PTR(-ENOMEM);
395 }
396 
397 /*
398  * Allocate a new connection and add it into a bundle.
399  */
rxrpc_add_conn_to_bundle(struct rxrpc_bundle * bundle,gfp_t gfp)400 static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp)
401 	__releases(bundle->channel_lock)
402 {
403 	struct rxrpc_connection *candidate = NULL, *old = NULL;
404 	bool conflict;
405 	int i;
406 
407 	_enter("");
408 
409 	conflict = bundle->alloc_conn;
410 	if (!conflict)
411 		bundle->alloc_conn = true;
412 	spin_unlock(&bundle->channel_lock);
413 	if (conflict) {
414 		_leave(" [conf]");
415 		return;
416 	}
417 
418 	candidate = rxrpc_alloc_client_connection(bundle, gfp);
419 
420 	spin_lock(&bundle->channel_lock);
421 	bundle->alloc_conn = false;
422 
423 	if (IS_ERR(candidate)) {
424 		bundle->alloc_error = PTR_ERR(candidate);
425 		spin_unlock(&bundle->channel_lock);
426 		_leave(" [err %ld]", PTR_ERR(candidate));
427 		return;
428 	}
429 
430 	bundle->alloc_error = 0;
431 
432 	for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) {
433 		unsigned int shift = i * RXRPC_MAXCALLS;
434 		int j;
435 
436 		old = bundle->conns[i];
437 		if (!rxrpc_may_reuse_conn(old)) {
438 			if (old)
439 				trace_rxrpc_client(old, -1, rxrpc_client_replace);
440 			candidate->bundle_shift = shift;
441 			bundle->conns[i] = candidate;
442 			for (j = 0; j < RXRPC_MAXCALLS; j++)
443 				set_bit(shift + j, &bundle->avail_chans);
444 			candidate = NULL;
445 			break;
446 		}
447 
448 		old = NULL;
449 	}
450 
451 	spin_unlock(&bundle->channel_lock);
452 
453 	if (candidate) {
454 		_debug("discard C=%x", candidate->debug_id);
455 		trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
456 		rxrpc_put_connection(candidate);
457 	}
458 
459 	rxrpc_put_connection(old);
460 	_leave("");
461 }
462 
463 /*
464  * Add a connection to a bundle if there are no usable connections or we have
465  * connections waiting for extra capacity.
466  */
rxrpc_maybe_add_conn(struct rxrpc_bundle * bundle,gfp_t gfp)467 static void rxrpc_maybe_add_conn(struct rxrpc_bundle *bundle, gfp_t gfp)
468 {
469 	struct rxrpc_call *call;
470 	int i, usable;
471 
472 	_enter("");
473 
474 	spin_lock(&bundle->channel_lock);
475 
476 	/* See if there are any usable connections. */
477 	usable = 0;
478 	for (i = 0; i < ARRAY_SIZE(bundle->conns); i++)
479 		if (rxrpc_may_reuse_conn(bundle->conns[i]))
480 			usable++;
481 
482 	if (!usable && !list_empty(&bundle->waiting_calls)) {
483 		call = list_first_entry(&bundle->waiting_calls,
484 					struct rxrpc_call, chan_wait_link);
485 		if (test_bit(RXRPC_CALL_UPGRADE, &call->flags))
486 			bundle->try_upgrade = true;
487 	}
488 
489 	if (!usable)
490 		goto alloc_conn;
491 
492 	if (!bundle->avail_chans &&
493 	    !bundle->try_upgrade &&
494 	    !list_empty(&bundle->waiting_calls) &&
495 	    usable < ARRAY_SIZE(bundle->conns))
496 		goto alloc_conn;
497 
498 	spin_unlock(&bundle->channel_lock);
499 	_leave("");
500 	return;
501 
502 alloc_conn:
503 	return rxrpc_add_conn_to_bundle(bundle, gfp);
504 }
505 
506 /*
507  * Assign a channel to the call at the front of the queue and wake the call up.
508  * We don't increment the callNumber counter until this number has been exposed
509  * to the world.
510  */
rxrpc_activate_one_channel(struct rxrpc_connection * conn,unsigned int channel)511 static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
512 				       unsigned int channel)
513 {
514 	struct rxrpc_channel *chan = &conn->channels[channel];
515 	struct rxrpc_bundle *bundle = conn->bundle;
516 	struct rxrpc_call *call = list_entry(bundle->waiting_calls.next,
517 					     struct rxrpc_call, chan_wait_link);
518 	u32 call_id = chan->call_counter + 1;
519 
520 	_enter("C=%x,%u", conn->debug_id, channel);
521 
522 	trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
523 
524 	/* Cancel the final ACK on the previous call if it hasn't been sent yet
525 	 * as the DATA packet will implicitly ACK it.
526 	 */
527 	clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
528 	clear_bit(conn->bundle_shift + channel, &bundle->avail_chans);
529 
530 	rxrpc_see_call(call);
531 	list_del_init(&call->chan_wait_link);
532 	call->peer	= rxrpc_get_peer(conn->params.peer);
533 	call->conn	= rxrpc_get_connection(conn);
534 	call->cid	= conn->proto.cid | channel;
535 	call->call_id	= call_id;
536 	call->security	= conn->security;
537 	call->security_ix = conn->security_ix;
538 	call->service_id = conn->service_id;
539 
540 	trace_rxrpc_connect_call(call);
541 	_net("CONNECT call %08x:%08x as call %d on conn %d",
542 	     call->cid, call->call_id, call->debug_id, conn->debug_id);
543 
544 	write_lock_bh(&call->state_lock);
545 	call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
546 	write_unlock_bh(&call->state_lock);
547 
548 	/* Paired with the read barrier in rxrpc_connect_call().  This orders
549 	 * cid and epoch in the connection wrt to call_id without the need to
550 	 * take the channel_lock.
551 	 *
552 	 * We provisionally assign a callNumber at this point, but we don't
553 	 * confirm it until the call is about to be exposed.
554 	 *
555 	 * TODO: Pair with a barrier in the data_ready handler when that looks
556 	 * at the call ID through a connection channel.
557 	 */
558 	smp_wmb();
559 
560 	chan->call_id		= call_id;
561 	chan->call_debug_id	= call->debug_id;
562 	rcu_assign_pointer(chan->call, call);
563 	wake_up(&call->waitq);
564 }
565 
566 /*
567  * Remove a connection from the idle list if it's on it.
568  */
rxrpc_unidle_conn(struct rxrpc_bundle * bundle,struct rxrpc_connection * conn)569 static void rxrpc_unidle_conn(struct rxrpc_bundle *bundle, struct rxrpc_connection *conn)
570 {
571 	struct rxrpc_net *rxnet = bundle->params.local->rxnet;
572 	bool drop_ref;
573 
574 	if (!list_empty(&conn->cache_link)) {
575 		drop_ref = false;
576 		spin_lock(&rxnet->client_conn_cache_lock);
577 		if (!list_empty(&conn->cache_link)) {
578 			list_del_init(&conn->cache_link);
579 			drop_ref = true;
580 		}
581 		spin_unlock(&rxnet->client_conn_cache_lock);
582 		if (drop_ref)
583 			rxrpc_put_connection(conn);
584 	}
585 }
586 
587 /*
588  * Assign channels and callNumbers to waiting calls with channel_lock
589  * held by caller.
590  */
rxrpc_activate_channels_locked(struct rxrpc_bundle * bundle)591 static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle)
592 {
593 	struct rxrpc_connection *conn;
594 	unsigned long avail, mask;
595 	unsigned int channel, slot;
596 
597 	if (bundle->try_upgrade)
598 		mask = 1;
599 	else
600 		mask = ULONG_MAX;
601 
602 	while (!list_empty(&bundle->waiting_calls)) {
603 		avail = bundle->avail_chans & mask;
604 		if (!avail)
605 			break;
606 		channel = __ffs(avail);
607 		clear_bit(channel, &bundle->avail_chans);
608 
609 		slot = channel / RXRPC_MAXCALLS;
610 		conn = bundle->conns[slot];
611 		if (!conn)
612 			break;
613 
614 		if (bundle->try_upgrade)
615 			set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
616 		rxrpc_unidle_conn(bundle, conn);
617 
618 		channel &= (RXRPC_MAXCALLS - 1);
619 		conn->act_chans	|= 1 << channel;
620 		rxrpc_activate_one_channel(conn, channel);
621 	}
622 }
623 
624 /*
625  * Assign channels and callNumbers to waiting calls.
626  */
rxrpc_activate_channels(struct rxrpc_bundle * bundle)627 static void rxrpc_activate_channels(struct rxrpc_bundle *bundle)
628 {
629 	_enter("B=%x", bundle->debug_id);
630 
631 	trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans);
632 
633 	if (!bundle->avail_chans)
634 		return;
635 
636 	spin_lock(&bundle->channel_lock);
637 	rxrpc_activate_channels_locked(bundle);
638 	spin_unlock(&bundle->channel_lock);
639 	_leave("");
640 }
641 
642 /*
643  * Wait for a callNumber and a channel to be granted to a call.
644  */
rxrpc_wait_for_channel(struct rxrpc_bundle * bundle,struct rxrpc_call * call,gfp_t gfp)645 static int rxrpc_wait_for_channel(struct rxrpc_bundle *bundle,
646 				  struct rxrpc_call *call, gfp_t gfp)
647 {
648 	DECLARE_WAITQUEUE(myself, current);
649 	int ret = 0;
650 
651 	_enter("%d", call->debug_id);
652 
653 	if (!gfpflags_allow_blocking(gfp)) {
654 		rxrpc_maybe_add_conn(bundle, gfp);
655 		rxrpc_activate_channels(bundle);
656 		ret = bundle->alloc_error ?: -EAGAIN;
657 		goto out;
658 	}
659 
660 	add_wait_queue_exclusive(&call->waitq, &myself);
661 	for (;;) {
662 		rxrpc_maybe_add_conn(bundle, gfp);
663 		rxrpc_activate_channels(bundle);
664 		ret = bundle->alloc_error;
665 		if (ret < 0)
666 			break;
667 
668 		switch (call->interruptibility) {
669 		case RXRPC_INTERRUPTIBLE:
670 		case RXRPC_PREINTERRUPTIBLE:
671 			set_current_state(TASK_INTERRUPTIBLE);
672 			break;
673 		case RXRPC_UNINTERRUPTIBLE:
674 		default:
675 			set_current_state(TASK_UNINTERRUPTIBLE);
676 			break;
677 		}
678 		if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_AWAIT_CONN)
679 			break;
680 		if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
681 		     call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
682 		    signal_pending(current)) {
683 			ret = -ERESTARTSYS;
684 			break;
685 		}
686 		schedule();
687 	}
688 	remove_wait_queue(&call->waitq, &myself);
689 	__set_current_state(TASK_RUNNING);
690 
691 out:
692 	_leave(" = %d", ret);
693 	return ret;
694 }
695 
696 /*
697  * find a connection for a call
698  * - called in process context with IRQs enabled
699  */
rxrpc_connect_call(struct rxrpc_sock * rx,struct rxrpc_call * call,struct rxrpc_conn_parameters * cp,struct sockaddr_rxrpc * srx,gfp_t gfp)700 int rxrpc_connect_call(struct rxrpc_sock *rx,
701 		       struct rxrpc_call *call,
702 		       struct rxrpc_conn_parameters *cp,
703 		       struct sockaddr_rxrpc *srx,
704 		       gfp_t gfp)
705 {
706 	struct rxrpc_bundle *bundle;
707 	struct rxrpc_net *rxnet = cp->local->rxnet;
708 	int ret = 0;
709 
710 	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
711 
712 	rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
713 
714 	bundle = rxrpc_prep_call(rx, call, cp, srx, gfp);
715 	if (IS_ERR(bundle)) {
716 		ret = PTR_ERR(bundle);
717 		goto out;
718 	}
719 
720 	if (call->state == RXRPC_CALL_CLIENT_AWAIT_CONN) {
721 		ret = rxrpc_wait_for_channel(bundle, call, gfp);
722 		if (ret < 0)
723 			goto wait_failed;
724 	}
725 
726 granted_channel:
727 	/* Paired with the write barrier in rxrpc_activate_one_channel(). */
728 	smp_rmb();
729 
730 out_put_bundle:
731 	rxrpc_put_bundle(bundle);
732 out:
733 	_leave(" = %d", ret);
734 	return ret;
735 
736 wait_failed:
737 	spin_lock(&bundle->channel_lock);
738 	list_del_init(&call->chan_wait_link);
739 	spin_unlock(&bundle->channel_lock);
740 
741 	if (call->state != RXRPC_CALL_CLIENT_AWAIT_CONN) {
742 		ret = 0;
743 		goto granted_channel;
744 	}
745 
746 	trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
747 	rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
748 	rxrpc_disconnect_client_call(bundle, call);
749 	goto out_put_bundle;
750 }
751 
752 /*
753  * Note that a call, and thus a connection, is about to be exposed to the
754  * world.
755  */
rxrpc_expose_client_call(struct rxrpc_call * call)756 void rxrpc_expose_client_call(struct rxrpc_call *call)
757 {
758 	unsigned int channel = call->cid & RXRPC_CHANNELMASK;
759 	struct rxrpc_connection *conn = call->conn;
760 	struct rxrpc_channel *chan = &conn->channels[channel];
761 
762 	if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
763 		/* Mark the call ID as being used.  If the callNumber counter
764 		 * exceeds ~2 billion, we kill the connection after its
765 		 * outstanding calls have finished so that the counter doesn't
766 		 * wrap.
767 		 */
768 		chan->call_counter++;
769 		if (chan->call_counter >= INT_MAX)
770 			set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
771 		trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
772 	}
773 }
774 
775 /*
776  * Set the reap timer.
777  */
rxrpc_set_client_reap_timer(struct rxrpc_net * rxnet)778 static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
779 {
780 	if (!rxnet->kill_all_client_conns) {
781 		unsigned long now = jiffies;
782 		unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
783 
784 		if (rxnet->live)
785 			timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
786 	}
787 }
788 
789 /*
790  * Disconnect a client call.
791  */
rxrpc_disconnect_client_call(struct rxrpc_bundle * bundle,struct rxrpc_call * call)792 void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call *call)
793 {
794 	struct rxrpc_connection *conn;
795 	struct rxrpc_channel *chan = NULL;
796 	struct rxrpc_net *rxnet = bundle->params.local->rxnet;
797 	unsigned int channel;
798 	bool may_reuse;
799 	u32 cid;
800 
801 	_enter("c=%x", call->debug_id);
802 
803 	spin_lock(&bundle->channel_lock);
804 	set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
805 
806 	/* Calls that have never actually been assigned a channel can simply be
807 	 * discarded.
808 	 */
809 	conn = call->conn;
810 	if (!conn) {
811 		_debug("call is waiting");
812 		ASSERTCMP(call->call_id, ==, 0);
813 		ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
814 		list_del_init(&call->chan_wait_link);
815 		goto out;
816 	}
817 
818 	cid = call->cid;
819 	channel = cid & RXRPC_CHANNELMASK;
820 	chan = &conn->channels[channel];
821 	trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
822 
823 	if (rcu_access_pointer(chan->call) != call) {
824 		spin_unlock(&bundle->channel_lock);
825 		BUG();
826 	}
827 
828 	may_reuse = rxrpc_may_reuse_conn(conn);
829 
830 	/* If a client call was exposed to the world, we save the result for
831 	 * retransmission.
832 	 *
833 	 * We use a barrier here so that the call number and abort code can be
834 	 * read without needing to take a lock.
835 	 *
836 	 * TODO: Make the incoming packet handler check this and handle
837 	 * terminal retransmission without requiring access to the call.
838 	 */
839 	if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
840 		_debug("exposed %u,%u", call->call_id, call->abort_code);
841 		__rxrpc_disconnect_call(conn, call);
842 
843 		if (test_and_clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
844 			trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
845 			bundle->try_upgrade = false;
846 			if (may_reuse)
847 				rxrpc_activate_channels_locked(bundle);
848 		}
849 
850 	}
851 
852 	/* See if we can pass the channel directly to another call. */
853 	if (may_reuse && !list_empty(&bundle->waiting_calls)) {
854 		trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
855 		rxrpc_activate_one_channel(conn, channel);
856 		goto out;
857 	}
858 
859 	/* Schedule the final ACK to be transmitted in a short while so that it
860 	 * can be skipped if we find a follow-on call.  The first DATA packet
861 	 * of the follow on call will implicitly ACK this call.
862 	 */
863 	if (call->completion == RXRPC_CALL_SUCCEEDED &&
864 	    test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
865 		unsigned long final_ack_at = jiffies + 2;
866 
867 		WRITE_ONCE(chan->final_ack_at, final_ack_at);
868 		smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
869 		set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
870 		rxrpc_reduce_conn_timer(conn, final_ack_at);
871 	}
872 
873 	/* Deactivate the channel. */
874 	rcu_assign_pointer(chan->call, NULL);
875 	set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans);
876 	conn->act_chans	&= ~(1 << channel);
877 
878 	/* If no channels remain active, then put the connection on the idle
879 	 * list for a short while.  Give it a ref to stop it going away if it
880 	 * becomes unbundled.
881 	 */
882 	if (!conn->act_chans) {
883 		trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
884 		conn->idle_timestamp = jiffies;
885 
886 		rxrpc_get_connection(conn);
887 		spin_lock(&rxnet->client_conn_cache_lock);
888 		list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
889 		spin_unlock(&rxnet->client_conn_cache_lock);
890 
891 		rxrpc_set_client_reap_timer(rxnet);
892 	}
893 
894 out:
895 	spin_unlock(&bundle->channel_lock);
896 	_leave("");
897 	return;
898 }
899 
900 /*
901  * Remove a connection from a bundle.
902  */
rxrpc_unbundle_conn(struct rxrpc_connection * conn)903 static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
904 {
905 	struct rxrpc_bundle *bundle = conn->bundle;
906 	struct rxrpc_local *local = bundle->params.local;
907 	unsigned int bindex;
908 	bool need_drop = false, need_put = false;
909 	int i;
910 
911 	_enter("C=%x", conn->debug_id);
912 
913 	if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
914 		rxrpc_process_delayed_final_acks(conn, true);
915 
916 	spin_lock(&bundle->channel_lock);
917 	bindex = conn->bundle_shift / RXRPC_MAXCALLS;
918 	if (bundle->conns[bindex] == conn) {
919 		_debug("clear slot %u", bindex);
920 		bundle->conns[bindex] = NULL;
921 		for (i = 0; i < RXRPC_MAXCALLS; i++)
922 			clear_bit(conn->bundle_shift + i, &bundle->avail_chans);
923 		need_drop = true;
924 	}
925 	spin_unlock(&bundle->channel_lock);
926 
927 	/* If there are no more connections, remove the bundle */
928 	if (!bundle->avail_chans) {
929 		_debug("maybe unbundle");
930 		spin_lock(&local->client_bundles_lock);
931 
932 		for (i = 0; i < ARRAY_SIZE(bundle->conns); i++)
933 			if (bundle->conns[i])
934 				break;
935 		if (i == ARRAY_SIZE(bundle->conns) && !bundle->params.exclusive) {
936 			_debug("erase bundle");
937 			rb_erase(&bundle->local_node, &local->client_bundles);
938 			need_put = true;
939 		}
940 
941 		spin_unlock(&local->client_bundles_lock);
942 		if (need_put)
943 			rxrpc_put_bundle(bundle);
944 	}
945 
946 	if (need_drop)
947 		rxrpc_put_connection(conn);
948 	_leave("");
949 }
950 
951 /*
952  * Clean up a dead client connection.
953  */
rxrpc_kill_client_conn(struct rxrpc_connection * conn)954 static void rxrpc_kill_client_conn(struct rxrpc_connection *conn)
955 {
956 	struct rxrpc_local *local = conn->params.local;
957 	struct rxrpc_net *rxnet = local->rxnet;
958 
959 	_enter("C=%x", conn->debug_id);
960 
961 	trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
962 	atomic_dec(&rxnet->nr_client_conns);
963 
964 	rxrpc_put_client_connection_id(conn);
965 	rxrpc_kill_connection(conn);
966 }
967 
968 /*
969  * Clean up a dead client connections.
970  */
rxrpc_put_client_conn(struct rxrpc_connection * conn)971 void rxrpc_put_client_conn(struct rxrpc_connection *conn)
972 {
973 	const void *here = __builtin_return_address(0);
974 	unsigned int debug_id = conn->debug_id;
975 	int n;
976 
977 	n = atomic_dec_return(&conn->usage);
978 	trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here);
979 	if (n <= 0) {
980 		ASSERTCMP(n, >=, 0);
981 		rxrpc_kill_client_conn(conn);
982 	}
983 }
984 
985 /*
986  * Discard expired client connections from the idle list.  Each conn in the
987  * idle list has been exposed and holds an extra ref because of that.
988  *
989  * This may be called from conn setup or from a work item so cannot be
990  * considered non-reentrant.
991  */
rxrpc_discard_expired_client_conns(struct work_struct * work)992 void rxrpc_discard_expired_client_conns(struct work_struct *work)
993 {
994 	struct rxrpc_connection *conn;
995 	struct rxrpc_net *rxnet =
996 		container_of(work, struct rxrpc_net, client_conn_reaper);
997 	unsigned long expiry, conn_expires_at, now;
998 	unsigned int nr_conns;
999 
1000 	_enter("");
1001 
1002 	if (list_empty(&rxnet->idle_client_conns)) {
1003 		_leave(" [empty]");
1004 		return;
1005 	}
1006 
1007 	/* Don't double up on the discarding */
1008 	if (!spin_trylock(&rxnet->client_conn_discard_lock)) {
1009 		_leave(" [already]");
1010 		return;
1011 	}
1012 
1013 	/* We keep an estimate of what the number of conns ought to be after
1014 	 * we've discarded some so that we don't overdo the discarding.
1015 	 */
1016 	nr_conns = atomic_read(&rxnet->nr_client_conns);
1017 
1018 next:
1019 	spin_lock(&rxnet->client_conn_cache_lock);
1020 
1021 	if (list_empty(&rxnet->idle_client_conns))
1022 		goto out;
1023 
1024 	conn = list_entry(rxnet->idle_client_conns.next,
1025 			  struct rxrpc_connection, cache_link);
1026 
1027 	if (!rxnet->kill_all_client_conns) {
1028 		/* If the number of connections is over the reap limit, we
1029 		 * expedite discard by reducing the expiry timeout.  We must,
1030 		 * however, have at least a short grace period to be able to do
1031 		 * final-ACK or ABORT retransmission.
1032 		 */
1033 		expiry = rxrpc_conn_idle_client_expiry;
1034 		if (nr_conns > rxrpc_reap_client_connections)
1035 			expiry = rxrpc_conn_idle_client_fast_expiry;
1036 		if (conn->params.local->service_closed)
1037 			expiry = rxrpc_closed_conn_expiry * HZ;
1038 
1039 		conn_expires_at = conn->idle_timestamp + expiry;
1040 
1041 		now = READ_ONCE(jiffies);
1042 		if (time_after(conn_expires_at, now))
1043 			goto not_yet_expired;
1044 	}
1045 
1046 	trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1047 	list_del_init(&conn->cache_link);
1048 
1049 	spin_unlock(&rxnet->client_conn_cache_lock);
1050 
1051 	rxrpc_unbundle_conn(conn);
1052 	rxrpc_put_connection(conn); /* Drop the ->cache_link ref */
1053 
1054 	nr_conns--;
1055 	goto next;
1056 
1057 not_yet_expired:
1058 	/* The connection at the front of the queue hasn't yet expired, so
1059 	 * schedule the work item for that point if we discarded something.
1060 	 *
1061 	 * We don't worry if the work item is already scheduled - it can look
1062 	 * after rescheduling itself at a later time.  We could cancel it, but
1063 	 * then things get messier.
1064 	 */
1065 	_debug("not yet");
1066 	if (!rxnet->kill_all_client_conns)
1067 		timer_reduce(&rxnet->client_conn_reap_timer, conn_expires_at);
1068 
1069 out:
1070 	spin_unlock(&rxnet->client_conn_cache_lock);
1071 	spin_unlock(&rxnet->client_conn_discard_lock);
1072 	_leave("");
1073 }
1074 
1075 /*
1076  * Preemptively destroy all the client connection records rather than waiting
1077  * for them to time out
1078  */
rxrpc_destroy_all_client_connections(struct rxrpc_net * rxnet)1079 void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
1080 {
1081 	_enter("");
1082 
1083 	spin_lock(&rxnet->client_conn_cache_lock);
1084 	rxnet->kill_all_client_conns = true;
1085 	spin_unlock(&rxnet->client_conn_cache_lock);
1086 
1087 	del_timer_sync(&rxnet->client_conn_reap_timer);
1088 
1089 	if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
1090 		_debug("destroy: queue failed");
1091 
1092 	_leave("");
1093 }
1094 
1095 /*
1096  * Clean up the client connections on a local endpoint.
1097  */
rxrpc_clean_up_local_conns(struct rxrpc_local * local)1098 void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
1099 {
1100 	struct rxrpc_connection *conn, *tmp;
1101 	struct rxrpc_net *rxnet = local->rxnet;
1102 	LIST_HEAD(graveyard);
1103 
1104 	_enter("");
1105 
1106 	spin_lock(&rxnet->client_conn_cache_lock);
1107 
1108 	list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns,
1109 				 cache_link) {
1110 		if (conn->params.local == local) {
1111 			trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1112 			list_move(&conn->cache_link, &graveyard);
1113 		}
1114 	}
1115 
1116 	spin_unlock(&rxnet->client_conn_cache_lock);
1117 
1118 	while (!list_empty(&graveyard)) {
1119 		conn = list_entry(graveyard.next,
1120 				  struct rxrpc_connection, cache_link);
1121 		list_del_init(&conn->cache_link);
1122 		rxrpc_unbundle_conn(conn);
1123 		rxrpc_put_connection(conn);
1124 	}
1125 
1126 	_leave(" [culled]");
1127 }
1128