1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Local endpoint object management
3 *
4 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/module.h>
11 #include <linux/net.h>
12 #include <linux/skbuff.h>
13 #include <linux/slab.h>
14 #include <linux/udp.h>
15 #include <linux/ip.h>
16 #include <linux/hashtable.h>
17 #include <net/sock.h>
18 #include <net/udp.h>
19 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
21
22 static void rxrpc_local_processor(struct work_struct *);
23 static void rxrpc_local_rcu(struct rcu_head *);
24
25 /*
26 * Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
27 * same or greater than.
28 *
29 * We explicitly don't compare the RxRPC service ID as we want to reject
30 * conflicting uses by differing services. Further, we don't want to share
31 * addresses with different options (IPv6), so we don't compare those bits
32 * either.
33 */
rxrpc_local_cmp_key(const struct rxrpc_local * local,const struct sockaddr_rxrpc * srx)34 static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
35 const struct sockaddr_rxrpc *srx)
36 {
37 long diff;
38
39 diff = ((local->srx.transport_type - srx->transport_type) ?:
40 (local->srx.transport_len - srx->transport_len) ?:
41 (local->srx.transport.family - srx->transport.family));
42 if (diff != 0)
43 return diff;
44
45 switch (srx->transport.family) {
46 case AF_INET:
47 /* If the choice of UDP port is left up to the transport, then
48 * the endpoint record doesn't match.
49 */
50 return ((u16 __force)local->srx.transport.sin.sin_port -
51 (u16 __force)srx->transport.sin.sin_port) ?:
52 memcmp(&local->srx.transport.sin.sin_addr,
53 &srx->transport.sin.sin_addr,
54 sizeof(struct in_addr));
55 #ifdef CONFIG_AF_RXRPC_IPV6
56 case AF_INET6:
57 /* If the choice of UDP6 port is left up to the transport, then
58 * the endpoint record doesn't match.
59 */
60 return ((u16 __force)local->srx.transport.sin6.sin6_port -
61 (u16 __force)srx->transport.sin6.sin6_port) ?:
62 memcmp(&local->srx.transport.sin6.sin6_addr,
63 &srx->transport.sin6.sin6_addr,
64 sizeof(struct in6_addr));
65 #endif
66 default:
67 BUG();
68 }
69 }
70
71 /*
72 * Allocate a new local endpoint.
73 */
rxrpc_alloc_local(struct rxrpc_net * rxnet,const struct sockaddr_rxrpc * srx)74 static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
75 const struct sockaddr_rxrpc *srx)
76 {
77 struct rxrpc_local *local;
78
79 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
80 if (local) {
81 refcount_set(&local->ref, 1);
82 atomic_set(&local->active_users, 1);
83 local->rxnet = rxnet;
84 INIT_HLIST_NODE(&local->link);
85 INIT_WORK(&local->processor, rxrpc_local_processor);
86 init_rwsem(&local->defrag_sem);
87 skb_queue_head_init(&local->reject_queue);
88 skb_queue_head_init(&local->event_queue);
89 local->client_bundles = RB_ROOT;
90 spin_lock_init(&local->client_bundles_lock);
91 spin_lock_init(&local->lock);
92 rwlock_init(&local->services_lock);
93 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
94 memcpy(&local->srx, srx, sizeof(*srx));
95 local->srx.srx_service = 0;
96 trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
97 }
98
99 _leave(" = %p", local);
100 return local;
101 }
102
103 /*
104 * create the local socket
105 * - must be called with rxrpc_local_mutex locked
106 */
rxrpc_open_socket(struct rxrpc_local * local,struct net * net)107 static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
108 {
109 struct sock *usk;
110 int ret;
111
112 _enter("%p{%d,%d}",
113 local, local->srx.transport_type, local->srx.transport.family);
114
115 /* create a socket to represent the local endpoint */
116 ret = sock_create_kern(net, local->srx.transport.family,
117 local->srx.transport_type, 0, &local->socket);
118 if (ret < 0) {
119 _leave(" = %d [socket]", ret);
120 return ret;
121 }
122
123 /* set the socket up */
124 usk = local->socket->sk;
125 inet_sk(usk)->mc_loop = 0;
126
127 /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
128 inet_inc_convert_csum(usk);
129
130 rcu_assign_sk_user_data(usk, local);
131
132 udp_sk(usk)->encap_type = UDP_ENCAP_RXRPC;
133 udp_sk(usk)->encap_rcv = rxrpc_input_packet;
134 udp_sk(usk)->encap_destroy = NULL;
135 udp_sk(usk)->gro_receive = NULL;
136 udp_sk(usk)->gro_complete = NULL;
137
138 udp_encap_enable();
139 #if IS_ENABLED(CONFIG_AF_RXRPC_IPV6)
140 if (local->srx.transport.family == AF_INET6)
141 udpv6_encap_enable();
142 #endif
143 usk->sk_error_report = rxrpc_error_report;
144
145 /* if a local address was supplied then bind it */
146 if (local->srx.transport_len > sizeof(sa_family_t)) {
147 _debug("bind");
148 ret = kernel_bind(local->socket,
149 (struct sockaddr *)&local->srx.transport,
150 local->srx.transport_len);
151 if (ret < 0) {
152 _debug("bind failed %d", ret);
153 goto error;
154 }
155 }
156
157 switch (local->srx.transport.family) {
158 case AF_INET6:
159 /* we want to receive ICMPv6 errors */
160 ip6_sock_set_recverr(local->socket->sk);
161
162 /* Fall through and set IPv4 options too otherwise we don't get
163 * errors from IPv4 packets sent through the IPv6 socket.
164 */
165 fallthrough;
166 case AF_INET:
167 /* we want to receive ICMP errors */
168 ip_sock_set_recverr(local->socket->sk);
169
170 /* we want to set the don't fragment bit */
171 ip_sock_set_mtu_discover(local->socket->sk, IP_PMTUDISC_DO);
172
173 /* We want receive timestamps. */
174 sock_enable_timestamps(local->socket->sk);
175 break;
176
177 default:
178 BUG();
179 }
180
181 _leave(" = 0");
182 return 0;
183
184 error:
185 kernel_sock_shutdown(local->socket, SHUT_RDWR);
186 local->socket->sk->sk_user_data = NULL;
187 sock_release(local->socket);
188 local->socket = NULL;
189
190 _leave(" = %d", ret);
191 return ret;
192 }
193
194 /*
195 * Look up or create a new local endpoint using the specified local address.
196 */
rxrpc_lookup_local(struct net * net,const struct sockaddr_rxrpc * srx)197 struct rxrpc_local *rxrpc_lookup_local(struct net *net,
198 const struct sockaddr_rxrpc *srx)
199 {
200 struct rxrpc_local *local;
201 struct rxrpc_net *rxnet = rxrpc_net(net);
202 struct hlist_node *cursor;
203 const char *age;
204 long diff;
205 int ret;
206
207 _enter("{%d,%d,%pISp}",
208 srx->transport_type, srx->transport.family, &srx->transport);
209
210 mutex_lock(&rxnet->local_mutex);
211
212 hlist_for_each(cursor, &rxnet->local_endpoints) {
213 local = hlist_entry(cursor, struct rxrpc_local, link);
214
215 diff = rxrpc_local_cmp_key(local, srx);
216 if (diff != 0)
217 continue;
218
219 /* Services aren't allowed to share transport sockets, so
220 * reject that here. It is possible that the object is dying -
221 * but it may also still have the local transport address that
222 * we want bound.
223 */
224 if (srx->srx_service) {
225 local = NULL;
226 goto addr_in_use;
227 }
228
229 /* Found a match. We want to replace a dying object.
230 * Attempting to bind the transport socket may still fail if
231 * we're attempting to use a local address that the dying
232 * object is still using.
233 */
234 if (!rxrpc_use_local(local))
235 break;
236
237 age = "old";
238 goto found;
239 }
240
241 local = rxrpc_alloc_local(rxnet, srx);
242 if (!local)
243 goto nomem;
244
245 ret = rxrpc_open_socket(local, net);
246 if (ret < 0)
247 goto sock_error;
248
249 if (cursor) {
250 hlist_replace_rcu(cursor, &local->link);
251 cursor->pprev = NULL;
252 } else {
253 hlist_add_head_rcu(&local->link, &rxnet->local_endpoints);
254 }
255 age = "new";
256
257 found:
258 mutex_unlock(&rxnet->local_mutex);
259
260 _net("LOCAL %s %d {%pISp}",
261 age, local->debug_id, &local->srx.transport);
262
263 _leave(" = %p", local);
264 return local;
265
266 nomem:
267 ret = -ENOMEM;
268 sock_error:
269 mutex_unlock(&rxnet->local_mutex);
270 if (local)
271 call_rcu(&local->rcu, rxrpc_local_rcu);
272 _leave(" = %d", ret);
273 return ERR_PTR(ret);
274
275 addr_in_use:
276 mutex_unlock(&rxnet->local_mutex);
277 _leave(" = -EADDRINUSE");
278 return ERR_PTR(-EADDRINUSE);
279 }
280
281 /*
282 * Get a ref on a local endpoint.
283 */
rxrpc_get_local(struct rxrpc_local * local)284 struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
285 {
286 const void *here = __builtin_return_address(0);
287 int r;
288
289 __refcount_inc(&local->ref, &r);
290 trace_rxrpc_local(local->debug_id, rxrpc_local_got, r + 1, here);
291 return local;
292 }
293
294 /*
295 * Get a ref on a local endpoint unless its usage has already reached 0.
296 */
rxrpc_get_local_maybe(struct rxrpc_local * local)297 struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
298 {
299 const void *here = __builtin_return_address(0);
300 int r;
301
302 if (local) {
303 if (__refcount_inc_not_zero(&local->ref, &r))
304 trace_rxrpc_local(local->debug_id, rxrpc_local_got,
305 r + 1, here);
306 else
307 local = NULL;
308 }
309 return local;
310 }
311
312 /*
313 * Queue a local endpoint and pass the caller's reference to the work item.
314 */
rxrpc_queue_local(struct rxrpc_local * local)315 void rxrpc_queue_local(struct rxrpc_local *local)
316 {
317 const void *here = __builtin_return_address(0);
318 unsigned int debug_id = local->debug_id;
319 int r = refcount_read(&local->ref);
320
321 if (rxrpc_queue_work(&local->processor))
322 trace_rxrpc_local(debug_id, rxrpc_local_queued, r + 1, here);
323 else
324 rxrpc_put_local(local);
325 }
326
327 /*
328 * Drop a ref on a local endpoint.
329 */
rxrpc_put_local(struct rxrpc_local * local)330 void rxrpc_put_local(struct rxrpc_local *local)
331 {
332 const void *here = __builtin_return_address(0);
333 unsigned int debug_id;
334 bool dead;
335 int r;
336
337 if (local) {
338 debug_id = local->debug_id;
339
340 dead = __refcount_dec_and_test(&local->ref, &r);
341 trace_rxrpc_local(debug_id, rxrpc_local_put, r, here);
342
343 if (dead)
344 call_rcu(&local->rcu, rxrpc_local_rcu);
345 }
346 }
347
348 /*
349 * Start using a local endpoint.
350 */
rxrpc_use_local(struct rxrpc_local * local)351 struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
352 {
353 local = rxrpc_get_local_maybe(local);
354 if (!local)
355 return NULL;
356
357 if (!__rxrpc_use_local(local)) {
358 rxrpc_put_local(local);
359 return NULL;
360 }
361
362 return local;
363 }
364
365 /*
366 * Cease using a local endpoint. Once the number of active users reaches 0, we
367 * start the closure of the transport in the work processor.
368 */
rxrpc_unuse_local(struct rxrpc_local * local)369 void rxrpc_unuse_local(struct rxrpc_local *local)
370 {
371 if (local) {
372 if (__rxrpc_unuse_local(local)) {
373 rxrpc_get_local(local);
374 rxrpc_queue_local(local);
375 }
376 }
377 }
378
379 /*
380 * Destroy a local endpoint's socket and then hand the record to RCU to dispose
381 * of.
382 *
383 * Closing the socket cannot be done from bottom half context or RCU callback
384 * context because it might sleep.
385 */
rxrpc_local_destroyer(struct rxrpc_local * local)386 static void rxrpc_local_destroyer(struct rxrpc_local *local)
387 {
388 struct socket *socket = local->socket;
389 struct rxrpc_net *rxnet = local->rxnet;
390
391 _enter("%d", local->debug_id);
392
393 local->dead = true;
394
395 mutex_lock(&rxnet->local_mutex);
396 hlist_del_init_rcu(&local->link);
397 mutex_unlock(&rxnet->local_mutex);
398
399 rxrpc_clean_up_local_conns(local);
400 rxrpc_service_connection_reaper(&rxnet->service_conn_reaper);
401 ASSERT(!local->service);
402
403 if (socket) {
404 local->socket = NULL;
405 kernel_sock_shutdown(socket, SHUT_RDWR);
406 socket->sk->sk_user_data = NULL;
407 sock_release(socket);
408 }
409
410 /* At this point, there should be no more packets coming in to the
411 * local endpoint.
412 */
413 rxrpc_purge_queue(&local->reject_queue);
414 rxrpc_purge_queue(&local->event_queue);
415 }
416
417 /*
418 * Process events on an endpoint. The work item carries a ref which
419 * we must release.
420 */
rxrpc_local_processor(struct work_struct * work)421 static void rxrpc_local_processor(struct work_struct *work)
422 {
423 struct rxrpc_local *local =
424 container_of(work, struct rxrpc_local, processor);
425 bool again;
426
427 if (local->dead)
428 return;
429
430 trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
431 refcount_read(&local->ref), NULL);
432
433 do {
434 again = false;
435 if (!__rxrpc_use_local(local)) {
436 rxrpc_local_destroyer(local);
437 break;
438 }
439
440 if (!skb_queue_empty(&local->reject_queue)) {
441 rxrpc_reject_packets(local);
442 again = true;
443 }
444
445 if (!skb_queue_empty(&local->event_queue)) {
446 rxrpc_process_local_events(local);
447 again = true;
448 }
449
450 __rxrpc_unuse_local(local);
451 } while (again);
452
453 rxrpc_put_local(local);
454 }
455
456 /*
457 * Destroy a local endpoint after the RCU grace period expires.
458 */
rxrpc_local_rcu(struct rcu_head * rcu)459 static void rxrpc_local_rcu(struct rcu_head *rcu)
460 {
461 struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu);
462
463 _enter("%d", local->debug_id);
464
465 ASSERT(!work_pending(&local->processor));
466
467 _net("DESTROY LOCAL %d", local->debug_id);
468 kfree(local);
469 _leave("");
470 }
471
472 /*
473 * Verify the local endpoint list is empty by this point.
474 */
rxrpc_destroy_all_locals(struct rxrpc_net * rxnet)475 void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet)
476 {
477 struct rxrpc_local *local;
478
479 _enter("");
480
481 flush_workqueue(rxrpc_workqueue);
482
483 if (!hlist_empty(&rxnet->local_endpoints)) {
484 mutex_lock(&rxnet->local_mutex);
485 hlist_for_each_entry(local, &rxnet->local_endpoints, link) {
486 pr_err("AF_RXRPC: Leaked local %p {%d}\n",
487 local, refcount_read(&local->ref));
488 }
489 mutex_unlock(&rxnet->local_mutex);
490 BUG();
491 }
492 }
493