1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Local endpoint object management
3 *
4 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/module.h>
11 #include <linux/net.h>
12 #include <linux/skbuff.h>
13 #include <linux/slab.h>
14 #include <linux/udp.h>
15 #include <linux/ip.h>
16 #include <linux/hashtable.h>
17 #include <net/sock.h>
18 #include <net/udp.h>
19 #include <net/udp_tunnel.h>
20 #include <net/af_rxrpc.h>
21 #include "ar-internal.h"
22
23 static void rxrpc_local_processor(struct work_struct *);
24 static void rxrpc_local_rcu(struct rcu_head *);
25
26 /*
27 * Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
28 * same or greater than.
29 *
30 * We explicitly don't compare the RxRPC service ID as we want to reject
31 * conflicting uses by differing services. Further, we don't want to share
32 * addresses with different options (IPv6), so we don't compare those bits
33 * either.
34 */
rxrpc_local_cmp_key(const struct rxrpc_local * local,const struct sockaddr_rxrpc * srx)35 static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
36 const struct sockaddr_rxrpc *srx)
37 {
38 long diff;
39
40 diff = ((local->srx.transport_type - srx->transport_type) ?:
41 (local->srx.transport_len - srx->transport_len) ?:
42 (local->srx.transport.family - srx->transport.family));
43 if (diff != 0)
44 return diff;
45
46 switch (srx->transport.family) {
47 case AF_INET:
48 /* If the choice of UDP port is left up to the transport, then
49 * the endpoint record doesn't match.
50 */
51 return ((u16 __force)local->srx.transport.sin.sin_port -
52 (u16 __force)srx->transport.sin.sin_port) ?:
53 memcmp(&local->srx.transport.sin.sin_addr,
54 &srx->transport.sin.sin_addr,
55 sizeof(struct in_addr));
56 #ifdef CONFIG_AF_RXRPC_IPV6
57 case AF_INET6:
58 /* If the choice of UDP6 port is left up to the transport, then
59 * the endpoint record doesn't match.
60 */
61 return ((u16 __force)local->srx.transport.sin6.sin6_port -
62 (u16 __force)srx->transport.sin6.sin6_port) ?:
63 memcmp(&local->srx.transport.sin6.sin6_addr,
64 &srx->transport.sin6.sin6_addr,
65 sizeof(struct in6_addr));
66 #endif
67 default:
68 BUG();
69 }
70 }
71
72 /*
73 * Allocate a new local endpoint.
74 */
rxrpc_alloc_local(struct rxrpc_net * rxnet,const struct sockaddr_rxrpc * srx)75 static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
76 const struct sockaddr_rxrpc *srx)
77 {
78 struct rxrpc_local *local;
79
80 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
81 if (local) {
82 refcount_set(&local->ref, 1);
83 atomic_set(&local->active_users, 1);
84 local->rxnet = rxnet;
85 INIT_HLIST_NODE(&local->link);
86 INIT_WORK(&local->processor, rxrpc_local_processor);
87 init_rwsem(&local->defrag_sem);
88 skb_queue_head_init(&local->reject_queue);
89 skb_queue_head_init(&local->event_queue);
90 local->client_bundles = RB_ROOT;
91 spin_lock_init(&local->client_bundles_lock);
92 spin_lock_init(&local->lock);
93 rwlock_init(&local->services_lock);
94 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
95 memcpy(&local->srx, srx, sizeof(*srx));
96 local->srx.srx_service = 0;
97 trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
98 }
99
100 _leave(" = %p", local);
101 return local;
102 }
103
104 /*
105 * create the local socket
106 * - must be called with rxrpc_local_mutex locked
107 */
rxrpc_open_socket(struct rxrpc_local * local,struct net * net)108 static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
109 {
110 struct udp_tunnel_sock_cfg tuncfg = {NULL};
111 struct sockaddr_rxrpc *srx = &local->srx;
112 struct udp_port_cfg udp_conf = {0};
113 struct sock *usk;
114 int ret;
115
116 _enter("%p{%d,%d}",
117 local, srx->transport_type, srx->transport.family);
118
119 udp_conf.family = srx->transport.family;
120 udp_conf.use_udp_checksums = true;
121 if (udp_conf.family == AF_INET) {
122 udp_conf.local_ip = srx->transport.sin.sin_addr;
123 udp_conf.local_udp_port = srx->transport.sin.sin_port;
124 #if IS_ENABLED(CONFIG_AF_RXRPC_IPV6)
125 } else {
126 udp_conf.local_ip6 = srx->transport.sin6.sin6_addr;
127 udp_conf.local_udp_port = srx->transport.sin6.sin6_port;
128 udp_conf.use_udp6_tx_checksums = true;
129 udp_conf.use_udp6_rx_checksums = true;
130 #endif
131 }
132 ret = udp_sock_create(net, &udp_conf, &local->socket);
133 if (ret < 0) {
134 _leave(" = %d [socket]", ret);
135 return ret;
136 }
137
138 tuncfg.encap_type = UDP_ENCAP_RXRPC;
139 tuncfg.encap_rcv = rxrpc_input_packet;
140 tuncfg.encap_err_rcv = rxrpc_encap_err_rcv;
141 tuncfg.sk_user_data = local;
142 setup_udp_tunnel_sock(net, local->socket, &tuncfg);
143
144 /* set the socket up */
145 usk = local->socket->sk;
146 usk->sk_error_report = rxrpc_error_report;
147
148 switch (srx->transport.family) {
149 case AF_INET6:
150 /* we want to receive ICMPv6 errors */
151 ip6_sock_set_recverr(usk);
152
153 /* Fall through and set IPv4 options too otherwise we don't get
154 * errors from IPv4 packets sent through the IPv6 socket.
155 */
156 fallthrough;
157 case AF_INET:
158 /* we want to receive ICMP errors */
159 ip_sock_set_recverr(usk);
160
161 /* we want to set the don't fragment bit */
162 ip_sock_set_mtu_discover(usk, IP_PMTUDISC_DO);
163
164 /* We want receive timestamps. */
165 sock_enable_timestamps(usk);
166 break;
167
168 default:
169 BUG();
170 }
171
172 _leave(" = 0");
173 return 0;
174 }
175
176 /*
177 * Look up or create a new local endpoint using the specified local address.
178 */
rxrpc_lookup_local(struct net * net,const struct sockaddr_rxrpc * srx)179 struct rxrpc_local *rxrpc_lookup_local(struct net *net,
180 const struct sockaddr_rxrpc *srx)
181 {
182 struct rxrpc_local *local;
183 struct rxrpc_net *rxnet = rxrpc_net(net);
184 struct hlist_node *cursor;
185 const char *age;
186 long diff;
187 int ret;
188
189 _enter("{%d,%d,%pISp}",
190 srx->transport_type, srx->transport.family, &srx->transport);
191
192 mutex_lock(&rxnet->local_mutex);
193
194 hlist_for_each(cursor, &rxnet->local_endpoints) {
195 local = hlist_entry(cursor, struct rxrpc_local, link);
196
197 diff = rxrpc_local_cmp_key(local, srx);
198 if (diff != 0)
199 continue;
200
201 /* Services aren't allowed to share transport sockets, so
202 * reject that here. It is possible that the object is dying -
203 * but it may also still have the local transport address that
204 * we want bound.
205 */
206 if (srx->srx_service) {
207 local = NULL;
208 goto addr_in_use;
209 }
210
211 /* Found a match. We want to replace a dying object.
212 * Attempting to bind the transport socket may still fail if
213 * we're attempting to use a local address that the dying
214 * object is still using.
215 */
216 if (!rxrpc_use_local(local))
217 break;
218
219 age = "old";
220 goto found;
221 }
222
223 local = rxrpc_alloc_local(rxnet, srx);
224 if (!local)
225 goto nomem;
226
227 ret = rxrpc_open_socket(local, net);
228 if (ret < 0)
229 goto sock_error;
230
231 if (cursor) {
232 hlist_replace_rcu(cursor, &local->link);
233 cursor->pprev = NULL;
234 } else {
235 hlist_add_head_rcu(&local->link, &rxnet->local_endpoints);
236 }
237 age = "new";
238
239 found:
240 mutex_unlock(&rxnet->local_mutex);
241
242 _net("LOCAL %s %d {%pISp}",
243 age, local->debug_id, &local->srx.transport);
244
245 _leave(" = %p", local);
246 return local;
247
248 nomem:
249 ret = -ENOMEM;
250 sock_error:
251 mutex_unlock(&rxnet->local_mutex);
252 if (local)
253 call_rcu(&local->rcu, rxrpc_local_rcu);
254 _leave(" = %d", ret);
255 return ERR_PTR(ret);
256
257 addr_in_use:
258 mutex_unlock(&rxnet->local_mutex);
259 _leave(" = -EADDRINUSE");
260 return ERR_PTR(-EADDRINUSE);
261 }
262
263 /*
264 * Get a ref on a local endpoint.
265 */
rxrpc_get_local(struct rxrpc_local * local)266 struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
267 {
268 const void *here = __builtin_return_address(0);
269 int r;
270
271 __refcount_inc(&local->ref, &r);
272 trace_rxrpc_local(local->debug_id, rxrpc_local_got, r + 1, here);
273 return local;
274 }
275
276 /*
277 * Get a ref on a local endpoint unless its usage has already reached 0.
278 */
rxrpc_get_local_maybe(struct rxrpc_local * local)279 struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
280 {
281 const void *here = __builtin_return_address(0);
282 int r;
283
284 if (local) {
285 if (__refcount_inc_not_zero(&local->ref, &r))
286 trace_rxrpc_local(local->debug_id, rxrpc_local_got,
287 r + 1, here);
288 else
289 local = NULL;
290 }
291 return local;
292 }
293
294 /*
295 * Queue a local endpoint and pass the caller's reference to the work item.
296 */
rxrpc_queue_local(struct rxrpc_local * local)297 void rxrpc_queue_local(struct rxrpc_local *local)
298 {
299 const void *here = __builtin_return_address(0);
300 unsigned int debug_id = local->debug_id;
301 int r = refcount_read(&local->ref);
302
303 if (rxrpc_queue_work(&local->processor))
304 trace_rxrpc_local(debug_id, rxrpc_local_queued, r + 1, here);
305 else
306 rxrpc_put_local(local);
307 }
308
309 /*
310 * Drop a ref on a local endpoint.
311 */
rxrpc_put_local(struct rxrpc_local * local)312 void rxrpc_put_local(struct rxrpc_local *local)
313 {
314 const void *here = __builtin_return_address(0);
315 unsigned int debug_id;
316 bool dead;
317 int r;
318
319 if (local) {
320 debug_id = local->debug_id;
321
322 dead = __refcount_dec_and_test(&local->ref, &r);
323 trace_rxrpc_local(debug_id, rxrpc_local_put, r, here);
324
325 if (dead)
326 call_rcu(&local->rcu, rxrpc_local_rcu);
327 }
328 }
329
330 /*
331 * Start using a local endpoint.
332 */
rxrpc_use_local(struct rxrpc_local * local)333 struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
334 {
335 local = rxrpc_get_local_maybe(local);
336 if (!local)
337 return NULL;
338
339 if (!__rxrpc_use_local(local)) {
340 rxrpc_put_local(local);
341 return NULL;
342 }
343
344 return local;
345 }
346
347 /*
348 * Cease using a local endpoint. Once the number of active users reaches 0, we
349 * start the closure of the transport in the work processor.
350 */
rxrpc_unuse_local(struct rxrpc_local * local)351 void rxrpc_unuse_local(struct rxrpc_local *local)
352 {
353 if (local) {
354 if (__rxrpc_unuse_local(local)) {
355 rxrpc_get_local(local);
356 rxrpc_queue_local(local);
357 }
358 }
359 }
360
361 /*
362 * Destroy a local endpoint's socket and then hand the record to RCU to dispose
363 * of.
364 *
365 * Closing the socket cannot be done from bottom half context or RCU callback
366 * context because it might sleep.
367 */
rxrpc_local_destroyer(struct rxrpc_local * local)368 static void rxrpc_local_destroyer(struct rxrpc_local *local)
369 {
370 struct socket *socket = local->socket;
371 struct rxrpc_net *rxnet = local->rxnet;
372
373 _enter("%d", local->debug_id);
374
375 local->dead = true;
376
377 mutex_lock(&rxnet->local_mutex);
378 hlist_del_init_rcu(&local->link);
379 mutex_unlock(&rxnet->local_mutex);
380
381 rxrpc_clean_up_local_conns(local);
382 rxrpc_service_connection_reaper(&rxnet->service_conn_reaper);
383 ASSERT(!local->service);
384
385 if (socket) {
386 local->socket = NULL;
387 kernel_sock_shutdown(socket, SHUT_RDWR);
388 socket->sk->sk_user_data = NULL;
389 sock_release(socket);
390 }
391
392 /* At this point, there should be no more packets coming in to the
393 * local endpoint.
394 */
395 rxrpc_purge_queue(&local->reject_queue);
396 rxrpc_purge_queue(&local->event_queue);
397 }
398
399 /*
400 * Process events on an endpoint. The work item carries a ref which
401 * we must release.
402 */
rxrpc_local_processor(struct work_struct * work)403 static void rxrpc_local_processor(struct work_struct *work)
404 {
405 struct rxrpc_local *local =
406 container_of(work, struct rxrpc_local, processor);
407 bool again;
408
409 if (local->dead)
410 return;
411
412 trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
413 refcount_read(&local->ref), NULL);
414
415 do {
416 again = false;
417 if (!__rxrpc_use_local(local)) {
418 rxrpc_local_destroyer(local);
419 break;
420 }
421
422 if (!skb_queue_empty(&local->reject_queue)) {
423 rxrpc_reject_packets(local);
424 again = true;
425 }
426
427 if (!skb_queue_empty(&local->event_queue)) {
428 rxrpc_process_local_events(local);
429 again = true;
430 }
431
432 __rxrpc_unuse_local(local);
433 } while (again);
434
435 rxrpc_put_local(local);
436 }
437
438 /*
439 * Destroy a local endpoint after the RCU grace period expires.
440 */
rxrpc_local_rcu(struct rcu_head * rcu)441 static void rxrpc_local_rcu(struct rcu_head *rcu)
442 {
443 struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu);
444
445 _enter("%d", local->debug_id);
446
447 ASSERT(!work_pending(&local->processor));
448
449 _net("DESTROY LOCAL %d", local->debug_id);
450 kfree(local);
451 _leave("");
452 }
453
454 /*
455 * Verify the local endpoint list is empty by this point.
456 */
rxrpc_destroy_all_locals(struct rxrpc_net * rxnet)457 void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet)
458 {
459 struct rxrpc_local *local;
460
461 _enter("");
462
463 flush_workqueue(rxrpc_workqueue);
464
465 if (!hlist_empty(&rxnet->local_endpoints)) {
466 mutex_lock(&rxnet->local_mutex);
467 hlist_for_each_entry(local, &rxnet->local_endpoints, link) {
468 pr_err("AF_RXRPC: Leaked local %p {%d}\n",
469 local, refcount_read(&local->ref));
470 }
471 mutex_unlock(&rxnet->local_mutex);
472 BUG();
473 }
474 }
475