• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC remote transport endpoint record management
3  *
4  * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/net.h>
12 #include <linux/skbuff.h>
13 #include <linux/udp.h>
14 #include <linux/in.h>
15 #include <linux/in6.h>
16 #include <linux/slab.h>
17 #include <linux/hashtable.h>
18 #include <net/sock.h>
19 #include <net/af_rxrpc.h>
20 #include <net/ip.h>
21 #include <net/route.h>
22 #include <net/ip6_route.h>
23 #include "ar-internal.h"
24 
25 /*
26  * Hash a peer key.
27  */
rxrpc_peer_hash_key(struct rxrpc_local * local,const struct sockaddr_rxrpc * srx)28 static unsigned long rxrpc_peer_hash_key(struct rxrpc_local *local,
29 					 const struct sockaddr_rxrpc *srx)
30 {
31 	const u16 *p;
32 	unsigned int i, size;
33 	unsigned long hash_key;
34 
35 	_enter("");
36 
37 	hash_key = (unsigned long)local / __alignof__(*local);
38 	hash_key += srx->transport_type;
39 	hash_key += srx->transport_len;
40 	hash_key += srx->transport.family;
41 
42 	switch (srx->transport.family) {
43 	case AF_INET:
44 		hash_key += (u16 __force)srx->transport.sin.sin_port;
45 		size = sizeof(srx->transport.sin.sin_addr);
46 		p = (u16 *)&srx->transport.sin.sin_addr;
47 		break;
48 #ifdef CONFIG_AF_RXRPC_IPV6
49 	case AF_INET6:
50 		hash_key += (u16 __force)srx->transport.sin.sin_port;
51 		size = sizeof(srx->transport.sin6.sin6_addr);
52 		p = (u16 *)&srx->transport.sin6.sin6_addr;
53 		break;
54 #endif
55 	default:
56 		WARN(1, "AF_RXRPC: Unsupported transport address family\n");
57 		return 0;
58 	}
59 
60 	/* Step through the peer address in 16-bit portions for speed */
61 	for (i = 0; i < size; i += sizeof(*p), p++)
62 		hash_key += *p;
63 
64 	_leave(" 0x%lx", hash_key);
65 	return hash_key;
66 }
67 
68 /*
69  * Compare a peer to a key.  Return -ve, 0 or +ve to indicate less than, same
70  * or greater than.
71  *
72  * Unfortunately, the primitives in linux/hashtable.h don't allow for sorted
73  * buckets and mid-bucket insertion, so we don't make full use of this
74  * information at this point.
75  */
rxrpc_peer_cmp_key(const struct rxrpc_peer * peer,struct rxrpc_local * local,const struct sockaddr_rxrpc * srx,unsigned long hash_key)76 static long rxrpc_peer_cmp_key(const struct rxrpc_peer *peer,
77 			       struct rxrpc_local *local,
78 			       const struct sockaddr_rxrpc *srx,
79 			       unsigned long hash_key)
80 {
81 	long diff;
82 
83 	diff = ((peer->hash_key - hash_key) ?:
84 		((unsigned long)peer->local - (unsigned long)local) ?:
85 		(peer->srx.transport_type - srx->transport_type) ?:
86 		(peer->srx.transport_len - srx->transport_len) ?:
87 		(peer->srx.transport.family - srx->transport.family));
88 	if (diff != 0)
89 		return diff;
90 
91 	switch (srx->transport.family) {
92 	case AF_INET:
93 		return ((u16 __force)peer->srx.transport.sin.sin_port -
94 			(u16 __force)srx->transport.sin.sin_port) ?:
95 			memcmp(&peer->srx.transport.sin.sin_addr,
96 			       &srx->transport.sin.sin_addr,
97 			       sizeof(struct in_addr));
98 #ifdef CONFIG_AF_RXRPC_IPV6
99 	case AF_INET6:
100 		return ((u16 __force)peer->srx.transport.sin6.sin6_port -
101 			(u16 __force)srx->transport.sin6.sin6_port) ?:
102 			memcmp(&peer->srx.transport.sin6.sin6_addr,
103 			       &srx->transport.sin6.sin6_addr,
104 			       sizeof(struct in6_addr));
105 #endif
106 	default:
107 		BUG();
108 	}
109 }
110 
111 /*
112  * Look up a remote transport endpoint for the specified address using RCU.
113  */
__rxrpc_lookup_peer_rcu(struct rxrpc_local * local,const struct sockaddr_rxrpc * srx,unsigned long hash_key)114 static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
115 	struct rxrpc_local *local,
116 	const struct sockaddr_rxrpc *srx,
117 	unsigned long hash_key)
118 {
119 	struct rxrpc_peer *peer;
120 	struct rxrpc_net *rxnet = local->rxnet;
121 
122 	hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
123 		if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
124 		    atomic_read(&peer->usage) > 0)
125 			return peer;
126 	}
127 
128 	return NULL;
129 }
130 
131 /*
132  * Look up a remote transport endpoint for the specified address using RCU.
133  */
rxrpc_lookup_peer_rcu(struct rxrpc_local * local,const struct sockaddr_rxrpc * srx)134 struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
135 					 const struct sockaddr_rxrpc *srx)
136 {
137 	struct rxrpc_peer *peer;
138 	unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
139 
140 	peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
141 	if (peer) {
142 		_net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
143 		_leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
144 	}
145 	return peer;
146 }
147 
148 /*
149  * assess the MTU size for the network interface through which this peer is
150  * reached
151  */
rxrpc_assess_MTU_size(struct rxrpc_sock * rx,struct rxrpc_peer * peer)152 static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx,
153 				  struct rxrpc_peer *peer)
154 {
155 	struct net *net = sock_net(&rx->sk);
156 	struct dst_entry *dst;
157 	struct rtable *rt;
158 	struct flowi fl;
159 	struct flowi4 *fl4 = &fl.u.ip4;
160 #ifdef CONFIG_AF_RXRPC_IPV6
161 	struct flowi6 *fl6 = &fl.u.ip6;
162 #endif
163 
164 	peer->if_mtu = 1500;
165 
166 	memset(&fl, 0, sizeof(fl));
167 	switch (peer->srx.transport.family) {
168 	case AF_INET:
169 		rt = ip_route_output_ports(
170 			net, fl4, NULL,
171 			peer->srx.transport.sin.sin_addr.s_addr, 0,
172 			htons(7000), htons(7001), IPPROTO_UDP, 0, 0);
173 		if (IS_ERR(rt)) {
174 			_leave(" [route err %ld]", PTR_ERR(rt));
175 			return;
176 		}
177 		dst = &rt->dst;
178 		break;
179 
180 #ifdef CONFIG_AF_RXRPC_IPV6
181 	case AF_INET6:
182 		fl6->flowi6_iif = LOOPBACK_IFINDEX;
183 		fl6->flowi6_scope = RT_SCOPE_UNIVERSE;
184 		fl6->flowi6_proto = IPPROTO_UDP;
185 		memcpy(&fl6->daddr, &peer->srx.transport.sin6.sin6_addr,
186 		       sizeof(struct in6_addr));
187 		fl6->fl6_dport = htons(7001);
188 		fl6->fl6_sport = htons(7000);
189 		dst = ip6_route_output(net, NULL, fl6);
190 		if (dst->error) {
191 			_leave(" [route err %d]", dst->error);
192 			return;
193 		}
194 		break;
195 #endif
196 
197 	default:
198 		BUG();
199 	}
200 
201 	peer->if_mtu = dst_mtu(dst);
202 	dst_release(dst);
203 
204 	_leave(" [if_mtu %u]", peer->if_mtu);
205 }
206 
207 /*
208  * Allocate a peer.
209  */
rxrpc_alloc_peer(struct rxrpc_local * local,gfp_t gfp)210 struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
211 {
212 	struct rxrpc_peer *peer;
213 
214 	_enter("");
215 
216 	peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
217 	if (peer) {
218 		atomic_set(&peer->usage, 1);
219 		peer->local = rxrpc_get_local(local);
220 		INIT_HLIST_HEAD(&peer->error_targets);
221 		peer->service_conns = RB_ROOT;
222 		seqlock_init(&peer->service_conn_lock);
223 		spin_lock_init(&peer->lock);
224 		spin_lock_init(&peer->rtt_input_lock);
225 		peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
226 
227 		if (RXRPC_TX_SMSS > 2190)
228 			peer->cong_cwnd = 2;
229 		else if (RXRPC_TX_SMSS > 1095)
230 			peer->cong_cwnd = 3;
231 		else
232 			peer->cong_cwnd = 4;
233 	}
234 
235 	_leave(" = %p", peer);
236 	return peer;
237 }
238 
239 /*
240  * Initialise peer record.
241  */
rxrpc_init_peer(struct rxrpc_sock * rx,struct rxrpc_peer * peer,unsigned long hash_key)242 static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer,
243 			    unsigned long hash_key)
244 {
245 	peer->hash_key = hash_key;
246 	rxrpc_assess_MTU_size(rx, peer);
247 	peer->mtu = peer->if_mtu;
248 	peer->rtt_last_req = ktime_get_real();
249 
250 	switch (peer->srx.transport.family) {
251 	case AF_INET:
252 		peer->hdrsize = sizeof(struct iphdr);
253 		break;
254 #ifdef CONFIG_AF_RXRPC_IPV6
255 	case AF_INET6:
256 		peer->hdrsize = sizeof(struct ipv6hdr);
257 		break;
258 #endif
259 	default:
260 		BUG();
261 	}
262 
263 	switch (peer->srx.transport_type) {
264 	case SOCK_DGRAM:
265 		peer->hdrsize += sizeof(struct udphdr);
266 		break;
267 	default:
268 		BUG();
269 	}
270 
271 	peer->hdrsize += sizeof(struct rxrpc_wire_header);
272 	peer->maxdata = peer->mtu - peer->hdrsize;
273 }
274 
275 /*
276  * Set up a new peer.
277  */
rxrpc_create_peer(struct rxrpc_sock * rx,struct rxrpc_local * local,struct sockaddr_rxrpc * srx,unsigned long hash_key,gfp_t gfp)278 static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
279 					    struct rxrpc_local *local,
280 					    struct sockaddr_rxrpc *srx,
281 					    unsigned long hash_key,
282 					    gfp_t gfp)
283 {
284 	struct rxrpc_peer *peer;
285 
286 	_enter("");
287 
288 	peer = rxrpc_alloc_peer(local, gfp);
289 	if (peer) {
290 		memcpy(&peer->srx, srx, sizeof(*srx));
291 		rxrpc_init_peer(rx, peer, hash_key);
292 	}
293 
294 	_leave(" = %p", peer);
295 	return peer;
296 }
297 
298 /*
299  * Set up a new incoming peer.  There shouldn't be any other matching peers
300  * since we've already done a search in the list from the non-reentrant context
301  * (the data_ready handler) that is the only place we can add new peers.
302  */
rxrpc_new_incoming_peer(struct rxrpc_sock * rx,struct rxrpc_local * local,struct rxrpc_peer * peer)303 void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
304 			     struct rxrpc_peer *peer)
305 {
306 	struct rxrpc_net *rxnet = local->rxnet;
307 	unsigned long hash_key;
308 
309 	hash_key = rxrpc_peer_hash_key(local, &peer->srx);
310 	rxrpc_init_peer(rx, peer, hash_key);
311 
312 	spin_lock(&rxnet->peer_hash_lock);
313 	hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
314 	list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
315 	spin_unlock(&rxnet->peer_hash_lock);
316 }
317 
318 /*
319  * obtain a remote transport endpoint for the specified address
320  */
rxrpc_lookup_peer(struct rxrpc_sock * rx,struct rxrpc_local * local,struct sockaddr_rxrpc * srx,gfp_t gfp)321 struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
322 				     struct rxrpc_local *local,
323 				     struct sockaddr_rxrpc *srx, gfp_t gfp)
324 {
325 	struct rxrpc_peer *peer, *candidate;
326 	struct rxrpc_net *rxnet = local->rxnet;
327 	unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
328 
329 	_enter("{%pISp}", &srx->transport);
330 
331 	/* search the peer list first */
332 	rcu_read_lock();
333 	peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
334 	if (peer && !rxrpc_get_peer_maybe(peer))
335 		peer = NULL;
336 	rcu_read_unlock();
337 
338 	if (!peer) {
339 		/* The peer is not yet present in hash - create a candidate
340 		 * for a new record and then redo the search.
341 		 */
342 		candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp);
343 		if (!candidate) {
344 			_leave(" = NULL [nomem]");
345 			return NULL;
346 		}
347 
348 		spin_lock_bh(&rxnet->peer_hash_lock);
349 
350 		/* Need to check that we aren't racing with someone else */
351 		peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
352 		if (peer && !rxrpc_get_peer_maybe(peer))
353 			peer = NULL;
354 		if (!peer) {
355 			hash_add_rcu(rxnet->peer_hash,
356 				     &candidate->hash_link, hash_key);
357 			list_add_tail(&candidate->keepalive_link,
358 				      &rxnet->peer_keepalive_new);
359 		}
360 
361 		spin_unlock_bh(&rxnet->peer_hash_lock);
362 
363 		if (peer)
364 			kfree(candidate);
365 		else
366 			peer = candidate;
367 	}
368 
369 	_net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
370 
371 	_leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
372 	return peer;
373 }
374 
375 /*
376  * Get a ref on a peer record.
377  */
rxrpc_get_peer(struct rxrpc_peer * peer)378 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
379 {
380 	const void *here = __builtin_return_address(0);
381 	int n;
382 
383 	n = atomic_inc_return(&peer->usage);
384 	trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n, here);
385 	return peer;
386 }
387 
388 /*
389  * Get a ref on a peer record unless its usage has already reached 0.
390  */
rxrpc_get_peer_maybe(struct rxrpc_peer * peer)391 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
392 {
393 	const void *here = __builtin_return_address(0);
394 
395 	if (peer) {
396 		int n = atomic_fetch_add_unless(&peer->usage, 1, 0);
397 		if (n > 0)
398 			trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n + 1, here);
399 		else
400 			peer = NULL;
401 	}
402 	return peer;
403 }
404 
405 /*
406  * Discard a peer record.
407  */
__rxrpc_put_peer(struct rxrpc_peer * peer)408 static void __rxrpc_put_peer(struct rxrpc_peer *peer)
409 {
410 	struct rxrpc_net *rxnet = peer->local->rxnet;
411 
412 	ASSERT(hlist_empty(&peer->error_targets));
413 
414 	spin_lock_bh(&rxnet->peer_hash_lock);
415 	hash_del_rcu(&peer->hash_link);
416 	list_del_init(&peer->keepalive_link);
417 	spin_unlock_bh(&rxnet->peer_hash_lock);
418 
419 	rxrpc_put_local(peer->local);
420 	kfree_rcu(peer, rcu);
421 }
422 
423 /*
424  * Drop a ref on a peer record.
425  */
rxrpc_put_peer(struct rxrpc_peer * peer)426 void rxrpc_put_peer(struct rxrpc_peer *peer)
427 {
428 	const void *here = __builtin_return_address(0);
429 	unsigned int debug_id;
430 	int n;
431 
432 	if (peer) {
433 		debug_id = peer->debug_id;
434 		n = atomic_dec_return(&peer->usage);
435 		trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
436 		if (n == 0)
437 			__rxrpc_put_peer(peer);
438 	}
439 }
440 
441 /*
442  * Drop a ref on a peer record where the caller already holds the
443  * peer_hash_lock.
444  */
rxrpc_put_peer_locked(struct rxrpc_peer * peer)445 void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
446 {
447 	const void *here = __builtin_return_address(0);
448 	unsigned int debug_id = peer->debug_id;
449 	int n;
450 
451 	n = atomic_dec_return(&peer->usage);
452 	trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
453 	if (n == 0) {
454 		hash_del_rcu(&peer->hash_link);
455 		list_del_init(&peer->keepalive_link);
456 		rxrpc_put_local(peer->local);
457 		kfree_rcu(peer, rcu);
458 	}
459 }
460 
461 /*
462  * Make sure all peer records have been discarded.
463  */
rxrpc_destroy_all_peers(struct rxrpc_net * rxnet)464 void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
465 {
466 	struct rxrpc_peer *peer;
467 	int i;
468 
469 	for (i = 0; i < HASH_SIZE(rxnet->peer_hash); i++) {
470 		if (hlist_empty(&rxnet->peer_hash[i]))
471 			continue;
472 
473 		hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) {
474 			pr_err("Leaked peer %u {%u} %pISp\n",
475 			       peer->debug_id,
476 			       atomic_read(&peer->usage),
477 			       &peer->srx.transport);
478 		}
479 	}
480 }
481 
482 /**
483  * rxrpc_kernel_get_peer - Get the peer address of a call
484  * @sock: The socket on which the call is in progress.
485  * @call: The call to query
486  * @_srx: Where to place the result
487  *
488  * Get the address of the remote peer in a call.
489  */
rxrpc_kernel_get_peer(struct socket * sock,struct rxrpc_call * call,struct sockaddr_rxrpc * _srx)490 void rxrpc_kernel_get_peer(struct socket *sock, struct rxrpc_call *call,
491 			   struct sockaddr_rxrpc *_srx)
492 {
493 	*_srx = call->peer->srx;
494 }
495 EXPORT_SYMBOL(rxrpc_kernel_get_peer);
496 
497 /**
498  * rxrpc_kernel_get_rtt - Get a call's peer RTT
499  * @sock: The socket on which the call is in progress.
500  * @call: The call to query
501  *
502  * Get the call's peer RTT.
503  */
rxrpc_kernel_get_rtt(struct socket * sock,struct rxrpc_call * call)504 u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call)
505 {
506 	return call->peer->rtt;
507 }
508 EXPORT_SYMBOL(rxrpc_kernel_get_rtt);
509